text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import lru_cache
import sass
from django.conf import settings
from django.contrib.staticfiles.finders import get_finders
from pipeline.compilers import CompilerBase
from .sass_functions.svg_to_data_uri import svg_to_data_uri
def svg_to_data_uri_(file_):
return svg_to_data_uri(file_, get_include_paths())
OUTPUT_STYLE = getattr(settings, "LIBSASS_OUTPUT_STYLE", "nested")
SOURCE_COMMENTS = getattr(settings, "LIBSASS_SOURCE_COMMENTS", settings.DEBUG)
CUSTOM_FUNCTIONS = getattr(
settings, "LIBSASS_CUSTOM_FUNCTIONS", {"svg_data_uri": svg_to_data_uri_}
)
@lru_cache()
def get_include_paths():
"""
Generate a list of include paths that libsass should use to find files
mentioned in @import lines.
"""
include_paths = []
# Look for staticfile finders that define 'storages'
for finder in get_finders():
try:
storages = finder.storages
except AttributeError:
continue
for storage in storages.values():
try:
include_paths.append(storage.path("."))
except NotImplementedError: # pragma: no cover
# storages that do not implement 'path' do not store files locally,
# and thus cannot provide an include path
pass
return include_paths
def compile(**kwargs):
"""Perform sass.compile, but with the appropriate include_paths for Django added"""
INCLUDE_PATHS = get_include_paths()
kwargs = kwargs.copy()
kwargs["include_paths"] = (kwargs.get("include_paths") or []) + INCLUDE_PATHS
custom_functions = CUSTOM_FUNCTIONS.copy()
custom_functions.update(kwargs.get("custom_functions", {}))
kwargs["custom_functions"] = custom_functions
return sass.compile(**kwargs)
class SassCompiler(CompilerBase):
output_extension = "css"
def match_file(self, filename):
return filename.endswith(".scss")
def compile_file(self, infile, outfile, outdated=False, force=False):
# if not outdated and not force:
# return
with open(outfile, "w") as f:
f.write(
compile(
filename=infile,
output_style=OUTPUT_STYLE,
source_comments=SOURCE_COMMENTS,
)
)
return outfile
| {
"repo_name": "bulv1ne/django-utils",
"path": "utils/pipeline_compilers.py",
"copies": "1",
"size": "2345",
"license": "mit",
"hash": -1349512970398780700,
"line_mean": 29.0641025641,
"line_max": 87,
"alpha_frac": 0.6341151386,
"autogenerated": false,
"ratio": 4.114035087719298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5248150226319298,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import sublime
import sublime_plugin
__all__ = (
"ProjectFileChanges",
)
MYPY = False
if MYPY:
from typing import Callable, Dict, Optional, TypeVar
T = TypeVar("T")
class GitSavvySettings:
def __init__(self, window=None):
# type: (sublime.Window) -> None
self._window = window or sublime.active_window()
self._global_settings = get_global_settings()
def get(self, key, default=None):
try:
return get_project_settings(self._window)[key]
except KeyError:
return self._global_settings.get(key, default)
def set(self, key, value):
self._global_settings.set(key, value)
CHANGE_COUNT = 0
class ProjectFileChanges(sublime_plugin.EventListener):
def on_post_save(self, view):
# type: (sublime.View) -> None
global CHANGE_COUNT
file_path = view.file_name()
if file_path and file_path.endswith(".sublime-project"):
CHANGE_COUNT += 1
def get_project_settings(window):
# type: (sublime.Window) -> Dict
global CHANGE_COUNT
return _get_project_settings(window.id(), CHANGE_COUNT)
@lru_cache(maxsize=16)
def _get_project_settings(wid, _counter):
# type: (sublime.WindowId, int) -> Dict
window = sublime.Window(wid)
project_data = window.project_data()
if not project_data:
return {}
return project_data.get("settings", {}).get("GitSavvy", {})
@lru_cache(maxsize=1)
def get_global_settings():
return GlobalSettings("GitSavvy.sublime-settings")
class GlobalSettings:
def __init__(self, name):
self._settings = s = sublime.load_settings(name)
s.clear_on_change(name)
s.add_on_change(name, self._on_update)
self._cache = {}
def get(self, name, default=None):
try:
return self._cache[name]
except KeyError:
self._cache[name] = current_value = self._settings.get(name, default)
return current_value
def set(self, name, value):
self._settings.set(name, value)
def _on_update(self):
self._cache.clear()
class SettingsMixin:
_savvy_settings = None
@property
def savvy_settings(self):
if not self._savvy_settings:
window = (
maybe(lambda: self.window) # type: ignore[attr-defined]
or maybe(lambda: self.view.window()) # type: ignore[attr-defined]
or sublime.active_window()
)
self._savvy_settings = GitSavvySettings(window)
return self._savvy_settings
def maybe(fn):
# type: (Callable[[], T]) -> Optional[T]
try:
return fn()
except Exception:
return None
| {
"repo_name": "divmain/GitSavvy",
"path": "core/settings.py",
"copies": "1",
"size": "2736",
"license": "mit",
"hash": 6872041300600098000,
"line_mean": 24.5700934579,
"line_max": 82,
"alpha_frac": 0.6096491228,
"autogenerated": false,
"ratio": 3.590551181102362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700200303902362,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
@lru_cache()
def assay_term_name(*assay_names):
""" file.dataset.assay_term_name auditor condition factory
"""
def assay_name_condition(value, system):
context = system['context']
assay_uuid = context.upgrade_properties()['dataset']
assay_name = _assay_name(assay_uuid, system['root'])
return assay_name in assay_names
return assay_name_condition
@lru_cache()
def rfa(*rfa_names):
""" award.rfa auditor condition factory
"""
def rfa_condition(value, system):
context = system['context']
award_uuid = context.upgrade_properties()['award']
rfa = _award_rfa(award_uuid, system['root'])
return rfa in rfa_names
return rfa_condition
@lru_cache()
def _award_rfa(award_uuid, root):
award = root.get_by_uuid(award_uuid)
return award.upgrade_properties().get('rfa')
@lru_cache()
def _assay_name(assay_uuid, root):
assay = root.get_by_uuid(assay_uuid)
return assay.upgrade_properties().get('assay_term_name')
| {
"repo_name": "ENCODE-DCC/encoded",
"path": "src/encoded/audit/conditions.py",
"copies": "1",
"size": "1052",
"license": "mit",
"hash": -4455436042704699000,
"line_mean": 26.6842105263,
"line_max": 62,
"alpha_frac": 0.6549429658,
"autogenerated": false,
"ratio": 3.187878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4342821753678788,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
magic = 1362
# test data
# magic = 10
# goal_x = 7
# goal_y = 4
# a
goal_x = 31
goal_y = 39
# b
max_steps = 51
visited = {}
@lru_cache(None)
def is_wall(x, y):
factor = magic + x * x + 3 * x + 2 * x * y + y + y * y
bits = 0
while True:
bits += factor & 1
factor >>= 1
if not factor:
break
return bits % 2
def is_visited(x, y):
return y in visited and x in visited[y]
def visit(x, y):
if y not in visited:
visited[y] = {}
visited[y][x] = True
def cnt():
c = 0
for y in visited:
c += len(visited[y])
return c
def draw():
for y in range(0, 7):
s = ''
for x in range(0, 10):
s += '# ' if is_wall(x, y) else '. '
print(s)
queue = [(1, 1, 0)]
while queue:
x, y, steps = queue.pop(0)
if max_steps and steps == max_steps:
print("We could have visited " + str(cnt()) + " locations before we got here")
break
visit(x, y)
if x == goal_x and y == goal_y:
print("We did it! We're like .. the best: " + str(steps))
break
if not is_wall(x+1, y) and not is_visited(x+1, y):
queue.append((x+1, y, steps+1))
if not is_wall(x, y+1) and not is_visited(x, y+1):
queue.append((x, y+1, steps+1))
if x > 0 and not is_wall(x-1, y) and not is_visited(x-1, y):
queue.append((x-1, y, steps+1))
if y > 0 and not is_wall(x, y-1) and not is_visited(x, y-1):
queue.append((x, y-1, steps+1))
| {
"repo_name": "matslindh/codingchallenges",
"path": "adventofcode2016/13.py",
"copies": "1",
"size": "1553",
"license": "mit",
"hash": -1083301737922663000,
"line_mean": 16.0659340659,
"line_max": 86,
"alpha_frac": 0.5054732775,
"autogenerated": false,
"ratio": 2.8185117967332123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38239850742332127,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
class Fibonacci:
fibonacci_cache = {}
def fibonacci_value(self, n):
"""This is function to print Fibonacci value of a number at Given position
This function is coded in python 3.5. """
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n <= 0:
raise ValueError("Kindly Enter positive integer only ")
if n == 1:
return 1
elif n == 2:
return 1
else:
return self.fibonacci_value(n - 1) + self.fibonacci_value(n - 2)
def fibonacci_iter(self, n):
"""This function print the fabo series till the given integer"""
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n <= 0:
raise ValueError("Kindly Enter positive integer only ")
if n == 0:
print("Nothing to show")
if n == 1:
print("1 ")
if n == 2:
print("1\n1")
else:
print("1\n1")
last_digit = 1
now = 1
for num in range(n - 2):
term = last_digit + now
print(term)
last_digit = now
now = term
def fabonacci_explicit_momization(self, n):
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n <= 0:
raise ValueError("Kindly Enter positive integer only ")
# # Reusing value value from Cache
if n in self.fibonacci_cache:
print(self.fibonacci_cache[n])
return self.fibonacci_cache[n]
if n == 1:
return 1
elif n == 2:
return 1
else:
value = self.fabonacci_explicit_momization(n - 1) + self.fabonacci_explicit_momization(n - 2)
self.fibonacci_cache[n] = value
return value
@lru_cache(maxsize=None)
def fabonacci_using_functoolcache(self, n):
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n < 0:
raise ValueError("Kindly Enter positive integer only ")
if n < 2:
return n
return self.fabonacci_using_functoolcache(n - 1) + self.fabonacci_using_functoolcache(n - 2)
if __name__ == "__main__":
# Creating object of our fabonacci class
fibonacci = Fibonacci()
print("""Enter your Choice
1 - Printing nth term of fibonacci
2 - Printing whole Series till nth term iterative Way (Fast).
3 - Printing Whole series till nth term in recursive way
4 - Printing Whole series till nth term in recursive way(Fast Memorization)""")
choice = int(input())
print("Enter the number")
number = int(input())
if choice == 1:
print(fibonacci.fibonacci_value(number))
if choice == 2:
fibonacci.fibonacci_iter(number)
if choice == 3:
for x in range(1, number+1):
print(fibonacci.fabonacci_explicit_momization(x))
if choice == 4:
for x in range(1, number + 1):
print(fibonacci.fabonacci_using_functoolcache(x))
| {
"repo_name": "rvsingh011/NitK_Assignments",
"path": "Sem1/Algorithm/fabonacci.py",
"copies": "1",
"size": "3333",
"license": "mit",
"hash": 2572596408504585700,
"line_mean": 30.4433962264,
"line_max": 105,
"alpha_frac": 0.5538553855,
"autogenerated": false,
"ratio": 3.862108922363847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4915964307863847,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
class Solution:
def isInterleaveBU(self, s1: str, s2: str, s3: str) -> bool:
l1, l2, l3 = len(s1), len(s2), len(s3)
if l1 + l2 != l3:
return False
dp = [[False] * (l2 + 1) for _ in range(l1 + 1)]
for i in range(l1 + 1):
for j in range(l2 + 1):
if i == 0 and j == 0:
dp[i][j] = True
elif i == 0:
dp[i][j] = dp[i][j - 1] and s2[j - 1] == s3[j - 1]
elif j == 0:
dp[i][j] = dp[i - 1][j] and s1[i - 1] == s3[i - 1]
else:
dp[i][j] = (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]) or (
dp[i - 1][j] and s1[i - 1] == s3[i + j - 1]
)
return dp[-1][-1]
def isInterleaveTD(self, s1: str, s2: str, s3: str) -> bool:
@lru_cache(None)
def helper(s1: str, s2: str, s3: str) -> bool:
l1, l2, l3 = len(s1), len(s2), len(s3)
if l1 + l2 != l3:
return False
if not s1 and not s2 and not s3:
return True
return (
l1 > 0 and l3 > 0 and s1[0] == s3[0] and helper(s1[1:], s2, s3[1:])
) or (l2 > 0 and l3 > 0 and s2[0] == s3[0] and helper(s1, s2[1:], s3[1:]))
return helper(s1, s2, s3)
# TESTS
for s1, s2, s3, expected in [
("aabcc", "dbbca", "aadbbcbcac", True),
("aabcc", "dbbca", "aadbbbaccc", False),
("", "", "", True),
]:
sol = Solution()
actual = sol.isInterleaveBU(s1, s2, s3)
print(f"{s3} is formed by an interleaving of '{s1}'' and '{s2}' -> {actual}")
assert actual == expected
assert expected == sol.isInterleaveTD(s1, s2, s3)
| {
"repo_name": "l33tdaima/l33tdaima",
"path": "p097m/is_interleave.py",
"copies": "1",
"size": "1775",
"license": "mit",
"hash": -6008792071698867000,
"line_mean": 35.2244897959,
"line_max": 86,
"alpha_frac": 0.4304225352,
"autogenerated": false,
"ratio": 2.730769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3661191765969231,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
@lru_cache()
def generate_map(serial):
map = []
for y in range(0, 300):
row = []
for x in range(0, 300):
row.append(powerlevel(serial, x+1, y+1))
map.append(tuple(row))
return tuple(map)
@lru_cache(maxsize=1280000)
def sum_square(serial, x, y, kernel_size):
map = generate_map(serial)
if kernel_size == 1:
return map[y][x]
s = sum_square(serial, x, y, kernel_size - 1)
for y_v in range(y, y + kernel_size):
s += map[y_v][x + kernel_size - 1]
# -1 since we've already counted this previously
for x_v in range(x, x + kernel_size - 1):
s += map[y + kernel_size - 1][x_v]
return s
def fuel_cell(serial, kernel_size=3):
max_data = ()
max_v = None
for y in range(0, 300 - kernel_size):
for x in range(0, 300 - kernel_size):
avg = sum_square(serial, x, y, kernel_size)
if max_v is None or avg > max_v:
max_data = (x + 1, y + 1, avg)
max_v = avg
return max_data
def fuel_cell_varying_kernel(serial):
best = None
best_kernel_size = None
for size in range(1, 301):
result = fuel_cell(serial, kernel_size=size)
if not result:
continue
if not best or best[2] < result[2]:
best = result
best_kernel_size = size
return best[0], best[1], best_kernel_size, best[2]
def powerlevel(serial, x, y):
pl = (x + 10) * y + serial
pl *= (x + 10)
if pl < 100:
pl = 0
else:
pl = int(str(pl)[-3])
pl -= 5
return pl
def test_powerlevel():
assert powerlevel(57, 122, 79) == -5
assert powerlevel(39, 217, 196) == 0
assert powerlevel(71, 101, 153) == 4
def test_sum_square():
assert sum_square(18, 0, 0, 3) == -3
def test_fuel_cell():
assert fuel_cell(18) == (33, 45, 29)
assert fuel_cell(42) == (21, 61, 30)
def test_fuel_cell_varying_kernel():
assert fuel_cell_varying_kernel(18) == (90, 269, 16, 113)
assert fuel_cell_varying_kernel(42) == (232, 251, 12, 119)
if __name__ == '__main__':
print(fuel_cell_varying_kernel(5034))
print(fuel_cell(5034))
| {
"repo_name": "matslindh/codingchallenges",
"path": "adventofcode2018/11.py",
"copies": "1",
"size": "2226",
"license": "mit",
"hash": -9193964186040557000,
"line_mean": 20,
"line_max": 62,
"alpha_frac": 0.5489667565,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40489667565,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from custom import rewards
from stats.models import Award, Reward
_awards_tour = Award.objects.filter(type='tour')
_awards_mission = Award.objects.filter(type='mission')
_awards_sortie = Award.objects.filter(type='sortie')
_awards_vlife = Award.objects.filter(type='vlife')
@lru_cache(maxsize=32)
def get_reward_func(func_name):
return getattr(rewards, func_name)
@lru_cache(maxsize=512)
def rewarding(award_id, player_id):
return Reward.objects.get_or_create(award_id=award_id, player_id=player_id)
def reward_tour(player):
for award in _awards_tour:
if get_reward_func(award.func)(player=player):
rewarding(award_id=award.id, player_id=player.id)
def reward_mission(player_mission):
player = player_mission.player
for award in _awards_mission:
if get_reward_func(award.func)(player_mission=player_mission):
rewarding(award_id=award.id, player_id=player.id)
def reward_sortie(sortie):
player = sortie.player
for award in _awards_sortie:
if get_reward_func(award.func)(sortie=sortie):
rewarding(award_id=award.id, player_id=player.id)
def reward_vlife(vlife):
player = vlife.player
for award in _awards_vlife:
if get_reward_func(award.func)(vlife=vlife):
rewarding(award_id=award.id, player_id=player.id)
| {
"repo_name": "vaal-/il2_stats",
"path": "src/stats/rewards.py",
"copies": "1",
"size": "1415",
"license": "mit",
"hash": -685535736879680900,
"line_mean": 28.1063829787,
"line_max": 79,
"alpha_frac": 0.6770318021,
"autogenerated": false,
"ratio": 3.036480686695279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9213512488795279,
"avg_score": 0,
"num_lines": 47
} |
from functools import lru_cache
import cv2
import math
import numpy as np
import scipy
import scipy.integrate as integrate
from scipy.interpolate import interp1d
from visnav.algo import tools
from visnav.algo.image import ImageProc
from visnav.algo.model import Camera
from visnav.missions.didymos import DidymosSystemModel
from visnav.render.stars import Stars
class Sun:
RADIUS = 695510e3 # in meters
FLUX_DENSITY_AT_1AU = 1360.8 # in W/m2
SOLID_ANGLE_AT_1AU = 6.807e-5 # in steradians
AU = 1.495978707e11 # in meters
TEMPERATURE = 5778 # in K
METALLICITY = 0.012
LOG_SURFACE_G = 4.43
MAG_V = -26.7
_DIFFRACTION_INTERPOLATION_N = 400
_diffraction_relative_intensity_fun = {}
_ssi_interp = None
_tot_ssr_simple = None
@staticmethod
def sun_radius_rad(dist):
return math.atan(Sun.RADIUS/dist)
@staticmethod
def _unit_circle_segment_area(r):
alpha = math.acos(r-1)
unit_circle_area = np.pi
if r > 0.5:
return unit_circle_area - unit_circle_area*alpha/np.pi + (r-0.5)**2*math.tan(alpha)
else:
return unit_circle_area * alpha/np.pi - (0.5-r)**2*math.tan(np.pi-alpha)
@staticmethod
def flux_density(cam, sun_cf, mask=None):
"""
calculate flux density from sun hitting the camera, sun_cf in camera frame and in meters
"""
flux_density = np.zeros((cam.height, cam.width), dtype=np.float32)
sun_dist = np.linalg.norm(sun_cf)
sun_cf_n = sun_cf/sun_dist
lat, lon, _ = tools.cartesian2spherical(*sun_cf_n)
if abs(tools.wrap_rads(lon)) > math.radians(cam.exclusion_angle_x) or abs(lat) > math.radians(cam.exclusion_angle_y):
# baffle protected from all effects
return flux_density
visible_ratio, theta = Sun.direct(flux_density, cam, sun_cf, mask)
Sun.diffraction(flux_density, cam, sun_cf, mask, visible_ratio, theta)
# TODO: for visible_ratio take into account if cam shadowed by something while sun out of fov
visible_ratio = 1
Sun.scattering(flux_density, cam, sun_cf_n, visible_ratio, theta)
Sun.ghosts(flux_density, cam, sun_cf_n, visible_ratio, theta)
return (Sun.FLUX_DENSITY_AT_1AU * (Sun.AU / sun_dist)**2) * flux_density
@staticmethod
def direct(flux_density, cam, sun_cf, mask, accurate_theta=False):
sun_dist = np.linalg.norm(sun_cf)
sun_rad = Sun.sun_radius_rad(sun_dist)
lat, lon, _ = tools.cartesian2spherical(*sun_cf)
lon = tools.wrap_rads(lon)
hfx, hfy = math.radians(cam.x_fov / 2), math.radians(cam.y_fov / 2)
lats, lons = np.meshgrid(np.linspace(hfy, -hfy, cam.height), np.linspace(hfx, -hfx, cam.width))
px_v_s = np.stack((lats.squeeze().T, lons.squeeze().T), axis=2)
px_v_c = tools.spherical2cartesian_arr(px_v_s.reshape((-1, 2)), r=1) # expensive
theta = tools.angle_between_v_mx(sun_cf.reshape((-1, 1))/sun_dist, px_v_c, normalize=False) \
.reshape((cam.height, cam.width)) # expensive
direct = (theta < sun_rad).astype(np.float32)
full = np.sum(direct)
if abs(lon)+sun_rad > hfx or abs(lat)+sun_rad > hfy or full == 0:
# sun out of fov or completely behind asteroid
return 0, theta
if mask is not None:
direct[np.logical_not(mask)] = 0
# update flux_density array
flux_density += direct
x_ratio = 1 if abs(lon)-sun_rad < hfx else Sun._unit_circle_segment_area((hfx-abs(lon)+sun_rad)/(2*sun_rad))
y_ratio = 1 if abs(lat)-sun_rad < hfy else Sun._unit_circle_segment_area((hfy-abs(lat)+sun_rad)/(2*sun_rad))
# not strictly true as sun might not be in fov at all in case both x- and y-ratios are very small
not_obscured = np.sum(direct)
visible_ratio = not_obscured/full * x_ratio * y_ratio
if 0 < not_obscured < full and accurate_theta:
px_sun_v = px_v_c[(direct.reshape((-1, 1)) > 0).squeeze(), :]
theta = np.min(tools.angle_between_mx(px_sun_v, px_v_c), axis=1).reshape((cam.height, cam.width))
return visible_ratio, theta
@staticmethod
def diffraction(flux_density, cam, sun_cf, mask, visible_ratio, theta):
# DIFFRACTION
# from https://en.wikipedia.org/wiki/Airy_disk
if visible_ratio == 0:
# sun behind asteroid or out of fov, forget diffraction
return
sun_dist = np.linalg.norm(sun_cf)
sun_rad = Sun.sun_radius_rad(sun_dist)
diffraction = Sun.diffraction_relative_intensity(cam, sun_rad, theta)
diffraction[flux_density > 0] = 0 # dont add diffraction on top of direct observation
# if mask is not None:
# diffraction[np.logical_not(mask)] = 0
# update flux_denstity
flux_density += diffraction * visible_ratio
@staticmethod
def scattering(flux_density, cam, sun_cf_n, visible_ratio, theta):
# Using Rayleigh scattering, https://en.wikipedia.org/wiki/Rayleigh_scattering
# ~ 1+cos(theta)**2 * "some coef"
scattering = cam.scattering_coef * (1 + np.cos(theta)**2) * visible_ratio
flux_density += scattering
@staticmethod
def ghosts(flux_density, cam, sun_cf_n, visible_ratio, theta):
# TODO
pass
@staticmethod
def _diffraction(lam, aperture, theta):
x = 2*np.pi/lam * aperture/2 * 1e-3 * np.sin(theta)
return (2*scipy.special.j1(x)/x)**2
@staticmethod
def diffraction_relative_intensity(cam, sun_angular_radius, theta):
shape = theta.shape
key = hash((cam.x_fov, cam.y_fov, cam.aperture, cam.lambda_min, cam.lambda_max, sun_angular_radius) + tuple(cam.qeff_coefs))
if key not in Sun._diffraction_relative_intensity_fun:
lim = math.radians(np.linalg.norm((cam.x_fov, cam.y_fov))) + sun_angular_radius*2
examples = np.linspace(-lim, lim, Sun._DIFFRACTION_INTERPOLATION_N)
values = np.array([Sun.diffraction_relative_intensity_single(cam.aperture, tuple(cam.qeff_coefs),
cam.lambda_min, cam.lambda_max, th)
for th in examples])
Sun._diffraction_relative_intensity_fun[key] = scipy.interpolate.interp1d(examples, values)
return Sun._diffraction_relative_intensity_fun[key](theta.reshape((-1,))).reshape(shape)
@staticmethod
@lru_cache(maxsize=_DIFFRACTION_INTERPOLATION_N + 1)
def diffraction_relative_intensity_single(aperture, qeff_coefs, lambda_min, lambda_max, theta):
"""
Returns relative intensity for diffraction integrated over the spectra of the sun and the sensor
"""
if theta <= 0:
return 1
# h = 6.626e-34 # planck constant (m2kg/s)
# c = 3e8 # speed of light
# k = 1.380649e-23 # Boltzmann constant
# sun_sr = 6.807e-5 # sun steradians from earth
# def phi(lam):
# # planck's law of black body radiation [W/m3/sr]
# r = 2*h*c**2/lam**5/(math.exp(h*c/lam/k/Sun.TEMPERATURE) - 1)
# return r
qeff, _ = Camera.qeff_fn(qeff_coefs, lambda_min, lambda_max)
def total(lam):
return qeff(lam) * Sun.ssr_simple(lam) * Sun._diffraction(lam, aperture, theta)
if Sun._tot_ssr_simple is None:
tphi = integrate.quad(Sun.ssr_simple, 1.2e-7, 1e-3, limit=200)
Sun._tot_ssr_simple = tphi[0]
tint = integrate.quad(total, lambda_min, lambda_max, limit=50)
return tint[0]/Sun._tot_ssr_simple
@staticmethod
def ssr(lam, ssi_table=None):
""" solar spectral radiance in W/m3/sr """
if ssi_table is None:
ssi_table = SOLAR_SPECTRAL_IRRADIANCE
if Sun._ssi_interp is None:
Sun._ssi_interp = interp1d(ssi_table[:, 0], ssi_table[:, 1], kind='linear')
return Sun._ssi_interp(lam) / Sun.SOLID_ANGLE_AT_1AU
@staticmethod
def ssr_simple(lam):
""" Approximation of solar spectral radiance [W/m3/sr] using planck's law of black body radiation """
return Stars.black_body_radiation(Sun.TEMPERATURE, lam)
# 2000 ASTM Standard Extraterrestrial Spectrum Reference E-490-00 ([m], [W m-2 m-1])
# E490_00a_AM0.xls, https://rredc.nrel.gov/solar//spectra/am0/ASTM2000.html,
# @article{astm490,
# title={490. 2000 American Society for Testing \& Materials (ASTM) standard extraterrestrial solar spec-trum reference E-490-00 (2000)},
# author={ASTM, E},
# journal={from http://rredc.nrel.gov/solar/spectra/am0}
# }
# NOTE: FOR MORE ACCURACY, could use daily solar spectral irradiance from http://doi.org/10.7289/V51J97P6
SOLAR_SPECTRAL_IRRADIANCE = np.array((
(1.195e-07, 6.190e+04),
(1.205e-07, 5.614e+05),
(1.215e-07, 4.901e+06),
(1.225e-07, 1.184e+06),
(1.235e-07, 4.770e+04),
(1.245e-07, 3.430e+04),
(1.255e-07, 2.880e+04),
(1.265e-07, 3.520e+04),
(1.275e-07, 2.130e+04),
(1.285e-07, 1.730e+04),
(1.295e-07, 3.990e+04),
(1.305e-07, 1.206e+05),
(1.315e-07, 3.980e+04),
(1.325e-07, 4.130e+04),
(1.335e-07, 1.680e+05),
(1.345e-07, 4.570e+04),
(1.355e-07, 3.800e+04),
(1.365e-07, 3.090e+04),
(1.375e-07, 2.920e+04),
(1.385e-07, 3.970e+04),
(1.395e-07, 7.560e+04),
(1.405e-07, 6.080e+04),
(1.415e-07, 4.210e+04),
(1.425e-07, 4.680e+04),
(1.435e-07, 5.110e+04),
(1.445e-07, 5.090e+04),
(1.455e-07, 5.540e+04),
(1.465e-07, 7.090e+04),
(1.475e-07, 8.490e+04),
(1.485e-07, 8.200e+04),
(1.495e-07, 7.960e+04),
(1.505e-07, 8.700e+04),
(1.515e-07, 9.270e+04),
(1.525e-07, 1.163e+05),
(1.535e-07, 1.299e+05),
(1.545e-07, 2.059e+05),
(1.555e-07, 2.144e+05),
(1.565e-07, 1.847e+05),
(1.575e-07, 1.717e+05),
(1.585e-07, 1.675e+05),
(1.595e-07, 1.754e+05),
(1.605e-07, 1.934e+05),
(1.615e-07, 2.228e+05),
(1.625e-07, 2.519e+05),
(1.635e-07, 2.841e+05),
(1.645e-07, 2.973e+05),
(1.655e-07, 4.302e+05),
(1.665e-07, 3.989e+05),
(1.675e-07, 3.875e+05),
(1.685e-07, 4.556e+05),
(1.695e-07, 5.877e+05),
(1.705e-07, 6.616e+05),
(1.715e-07, 6.880e+05),
(1.725e-07, 7.252e+05),
(1.735e-07, 7.645e+05),
(1.745e-07, 9.067e+05),
(1.755e-07, 1.079e+06),
(1.765e-07, 1.220e+06),
(1.775e-07, 1.403e+06),
(1.785e-07, 1.538e+06),
(1.795e-07, 1.576e+06),
(1.805e-07, 1.831e+06),
(1.815e-07, 2.233e+06),
(1.825e-07, 2.243e+06),
(1.835e-07, 2.244e+06),
(1.845e-07, 2.066e+06),
(1.855e-07, 2.311e+06),
(1.865e-07, 2.700e+06),
(1.875e-07, 3.009e+06),
(1.885e-07, 3.291e+06),
(1.895e-07, 3.569e+06),
(1.905e-07, 3.764e+06),
(1.915e-07, 4.165e+06),
(1.925e-07, 4.113e+06),
(1.935e-07, 3.808e+06),
(1.945e-07, 5.210e+06),
(1.955e-07, 5.427e+06),
(1.965e-07, 6.008e+06),
(1.975e-07, 6.191e+06),
(1.985e-07, 6.187e+06),
(1.995e-07, 6.664e+06),
(2.005e-07, 7.326e+06),
(2.015e-07, 8.023e+06),
(2.025e-07, 8.261e+06),
(2.035e-07, 9.217e+06),
(2.045e-07, 1.025e+07),
(2.055e-07, 1.054e+07),
(2.065e-07, 1.108e+07),
(2.075e-07, 1.265e+07),
(2.085e-07, 1.505e+07),
(2.095e-07, 2.138e+07),
(2.105e-07, 2.792e+07),
(2.115e-07, 3.354e+07),
(2.125e-07, 3.130e+07),
(2.135e-07, 3.315e+07),
(2.145e-07, 4.003e+07),
(2.155e-07, 3.615e+07),
(2.165e-07, 3.227e+07),
(2.175e-07, 3.529e+07),
(2.185e-07, 4.437e+07),
(2.195e-07, 4.692e+07),
(2.205e-07, 4.733e+07),
(2.215e-07, 3.958e+07),
(2.225e-07, 4.965e+07),
(2.235e-07, 6.301e+07),
(2.245e-07, 5.897e+07),
(2.255e-07, 5.229e+07),
(2.265e-07, 3.940e+07),
(2.275e-07, 3.992e+07),
(2.285e-07, 5.195e+07),
(2.295e-07, 4.771e+07),
(2.305e-07, 5.212e+07),
(2.315e-07, 5.097e+07),
(2.325e-07, 5.326e+07),
(2.335e-07, 4.474e+07),
(2.345e-07, 3.897e+07),
(2.355e-07, 5.142e+07),
(2.365e-07, 4.859e+07),
(2.375e-07, 4.844e+07),
(2.385e-07, 4.196e+07),
(2.395e-07, 4.412e+07),
(2.405e-07, 3.956e+07),
(2.415e-07, 5.148e+07),
(2.425e-07, 7.060e+07),
(2.435e-07, 6.653e+07),
(2.445e-07, 6.097e+07),
(2.455e-07, 4.939e+07),
(2.465e-07, 5.040e+07),
(2.475e-07, 5.550e+07),
(2.485e-07, 4.565e+07),
(2.495e-07, 5.638e+07),
(2.505e-07, 6.010e+07),
(2.515e-07, 4.601e+07),
(2.525e-07, 4.155e+07),
(2.535e-07, 5.155e+07),
(2.545e-07, 5.957e+07),
(2.555e-07, 7.930e+07),
(2.565e-07, 1.018e+08),
(2.575e-07, 1.254e+08),
(2.585e-07, 1.251e+08),
(2.595e-07, 1.040e+08),
(2.605e-07, 8.551e+07),
(2.615e-07, 8.980e+07),
(2.625e-07, 1.036e+08),
(2.635e-07, 1.658e+08),
(2.645e-07, 2.497e+08),
(2.655e-07, 2.527e+08),
(2.665e-07, 2.494e+08),
(2.675e-07, 2.508e+08),
(2.685e-07, 2.438e+08),
(2.695e-07, 2.389e+08),
(2.705e-07, 2.673e+08),
(2.715e-07, 2.244e+08),
(2.725e-07, 1.974e+08),
(2.735e-07, 1.965e+08),
(2.745e-07, 1.326e+08),
(2.755e-07, 1.751e+08),
(2.765e-07, 2.428e+08),
(2.775e-07, 2.338e+08),
(2.785e-07, 1.593e+08),
(2.795e-07, 8.555e+07),
(2.805e-07, 9.463e+07),
(2.815e-07, 2.083e+08),
(2.825e-07, 2.941e+08),
(2.835e-07, 3.135e+08),
(2.845e-07, 2.353e+08),
(2.855e-07, 1.631e+08),
(2.865e-07, 3.227e+08),
(2.875e-07, 3.363e+08),
(2.885e-07, 3.222e+08),
(2.895e-07, 4.727e+08),
(2.905e-07, 6.013e+08),
(2.915e-07, 5.808e+08),
(2.925e-07, 5.219e+08),
(2.935e-07, 5.355e+08),
(2.945e-07, 5.088e+08),
(2.955e-07, 5.532e+08),
(2.965e-07, 5.096e+08),
(2.975e-07, 5.073e+08),
(2.985e-07, 4.655e+08),
(2.995e-07, 4.840e+08),
(3.005e-07, 4.200e+08),
(3.015e-07, 4.555e+08),
(3.025e-07, 4.890e+08),
(3.035e-07, 6.206e+08),
(3.045e-07, 6.025e+08),
(3.055e-07, 5.948e+08),
(3.065e-07, 5.557e+08),
(3.075e-07, 6.150e+08),
(3.085e-07, 6.114e+08),
(3.095e-07, 4.965e+08),
(3.105e-07, 6.224e+08),
(3.115e-07, 7.292e+08),
(3.125e-07, 6.559e+08),
(3.135e-07, 6.999e+08),
(3.145e-07, 6.629e+08),
(3.155e-07, 6.330e+08),
(3.165e-07, 6.332e+08),
(3.175e-07, 7.739e+08),
(3.185e-07, 6.649e+08),
(3.195e-07, 7.105e+08),
(3.205e-07, 8.051e+08),
(3.215e-07, 6.995e+08),
(3.225e-07, 6.886e+08),
(3.235e-07, 6.613e+08),
(3.245e-07, 7.608e+08),
(3.255e-07, 8.758e+08),
(3.265e-07, 9.795e+08),
(3.275e-07, 9.527e+08),
(3.285e-07, 9.176e+08),
(3.295e-07, 1.061e+09),
(3.305e-07, 1.016e+09),
(3.315e-07, 9.657e+08),
(3.325e-07, 9.549e+08),
(3.335e-07, 9.216e+08),
(3.345e-07, 9.589e+08),
(3.355e-07, 9.434e+08),
(3.365e-07, 8.095e+08),
(3.375e-07, 8.418e+08),
(3.385e-07, 9.215e+08),
(3.395e-07, 9.581e+08),
(3.405e-07, 1.007e+09),
(3.415e-07, 9.238e+08),
(3.425e-07, 9.930e+08),
(3.435e-07, 9.506e+08),
(3.445e-07, 7.957e+08),
(3.455e-07, 9.392e+08),
(3.465e-07, 9.264e+08),
(3.475e-07, 9.017e+08),
(3.485e-07, 8.972e+08),
(3.495e-07, 8.898e+08),
(3.505e-07, 1.050e+09),
(3.515e-07, 9.795e+08),
(3.525e-07, 9.079e+08),
(3.535e-07, 1.033e+09),
(3.545e-07, 1.111e+09),
(3.555e-07, 1.045e+09),
(3.565e-07, 9.123e+08),
(3.575e-07, 7.960e+08),
(3.585e-07, 6.936e+08),
(3.595e-07, 9.911e+08),
(3.605e-07, 9.708e+08),
(3.615e-07, 8.781e+08),
(3.625e-07, 9.978e+08),
(3.635e-07, 9.969e+08),
(3.645e-07, 1.013e+09),
(3.655e-07, 1.152e+09),
(3.665e-07, 1.233e+09),
(3.675e-07, 1.180e+09),
(3.685e-07, 1.101e+09),
(3.695e-07, 1.226e+09),
(3.705e-07, 1.139e+09),
(3.715e-07, 1.175e+09),
(3.725e-07, 1.054e+09),
(3.735e-07, 9.202e+08),
(3.745e-07, 9.004e+08),
(3.755e-07, 1.062e+09),
(3.765e-07, 1.085e+09),
(3.775e-07, 1.282e+09),
(3.785e-07, 1.327e+09),
(3.795e-07, 1.066e+09),
(3.805e-07, 1.202e+09),
(3.815e-07, 1.082e+09),
(3.825e-07, 7.913e+08),
(3.835e-07, 6.841e+08),
(3.845e-07, 9.597e+08),
(3.855e-07, 1.008e+09),
(3.865e-07, 1.007e+09),
(3.875e-07, 1.004e+09),
(3.885e-07, 9.843e+08),
(3.895e-07, 1.174e+09),
(3.905e-07, 1.247e+09),
(3.915e-07, 1.342e+09),
(3.925e-07, 1.019e+09),
(3.935e-07, 5.823e+08),
(3.945e-07, 1.026e+09),
(3.955e-07, 1.314e+09),
(3.965e-07, 8.545e+08),
(3.975e-07, 9.288e+08),
(3.985e-07, 1.522e+09),
(3.995e-07, 1.663e+09),
(4.005e-07, 1.682e+09),
(4.015e-07, 1.746e+09),
(4.025e-07, 1.759e+09),
(4.035e-07, 1.684e+09),
(4.045e-07, 1.674e+09),
(4.055e-07, 1.667e+09),
(4.065e-07, 1.589e+09),
(4.075e-07, 1.628e+09),
(4.085e-07, 1.735e+09),
(4.095e-07, 1.715e+09),
(4.105e-07, 1.532e+09),
(4.115e-07, 1.817e+09),
(4.125e-07, 1.789e+09),
(4.135e-07, 1.756e+09),
(4.145e-07, 1.737e+09),
(4.155e-07, 1.734e+09),
(4.165e-07, 1.842e+09),
(4.175e-07, 1.665e+09),
(4.185e-07, 1.684e+09),
(4.195e-07, 1.701e+09),
(4.205e-07, 1.757e+09),
(4.215e-07, 1.797e+09),
(4.225e-07, 1.582e+09),
(4.235e-07, 1.711e+09),
(4.245e-07, 1.767e+09),
(4.255e-07, 1.695e+09),
(4.265e-07, 1.698e+09),
(4.275e-07, 1.569e+09),
(4.285e-07, 1.587e+09),
(4.295e-07, 1.475e+09),
(4.305e-07, 1.135e+09),
(4.315e-07, 1.686e+09),
(4.325e-07, 1.646e+09),
(4.335e-07, 1.731e+09),
(4.345e-07, 1.670e+09),
(4.355e-07, 1.723e+09),
(4.365e-07, 1.929e+09),
(4.375e-07, 1.806e+09),
(4.385e-07, 1.567e+09),
(4.395e-07, 1.825e+09),
(4.405e-07, 1.713e+09),
(4.415e-07, 1.931e+09),
(4.425e-07, 1.980e+09),
(4.435e-07, 1.909e+09),
(4.445e-07, 1.973e+09),
(4.455e-07, 1.821e+09),
(4.465e-07, 1.891e+09),
(4.475e-07, 2.077e+09),
(4.485e-07, 1.973e+09),
(4.495e-07, 2.027e+09),
(4.505e-07, 2.144e+09),
(4.515e-07, 2.109e+09),
(4.525e-07, 1.941e+09),
(4.535e-07, 1.970e+09),
(4.545e-07, 1.979e+09),
(4.555e-07, 2.034e+09),
(4.565e-07, 2.077e+09),
(4.575e-07, 2.100e+09),
(4.585e-07, 1.971e+09),
(4.595e-07, 2.009e+09),
(4.605e-07, 2.040e+09),
(4.615e-07, 2.055e+09),
(4.625e-07, 2.104e+09),
(4.635e-07, 2.040e+09),
(4.645e-07, 1.976e+09),
(4.655e-07, 2.042e+09),
(4.665e-07, 1.921e+09),
(4.675e-07, 2.015e+09),
(4.685e-07, 1.994e+09),
(4.695e-07, 1.990e+09),
(4.705e-07, 1.877e+09),
(4.715e-07, 2.018e+09),
(4.725e-07, 2.041e+09),
(4.735e-07, 1.991e+09),
(4.745e-07, 2.051e+09),
(4.755e-07, 2.016e+09),
(4.765e-07, 1.956e+09),
(4.775e-07, 2.075e+09),
(4.785e-07, 2.009e+09),
(4.795e-07, 2.076e+09),
(4.805e-07, 2.035e+09),
(4.815e-07, 2.090e+09),
(4.825e-07, 2.023e+09),
(4.835e-07, 2.019e+09),
(4.845e-07, 1.969e+09),
(4.855e-07, 1.830e+09),
(4.865e-07, 1.625e+09),
(4.875e-07, 1.830e+09),
(4.885e-07, 1.914e+09),
(4.895e-07, 1.960e+09),
(4.905e-07, 2.007e+09),
(4.915e-07, 1.896e+09),
(4.925e-07, 1.896e+09),
(4.935e-07, 1.888e+09),
(4.945e-07, 2.058e+09),
(4.955e-07, 1.926e+09),
(4.965e-07, 2.017e+09),
(4.975e-07, 2.018e+09),
(4.985e-07, 1.866e+09),
(4.995e-07, 1.970e+09),
(5.005e-07, 1.857e+09),
(5.015e-07, 1.812e+09),
(5.025e-07, 1.894e+09),
(5.035e-07, 1.934e+09),
(5.045e-07, 1.869e+09),
(5.055e-07, 1.993e+09),
(5.065e-07, 1.961e+09),
(5.075e-07, 1.906e+09),
(5.085e-07, 1.919e+09),
(5.095e-07, 1.916e+09),
(5.105e-07, 1.947e+09),
(5.115e-07, 1.997e+09),
(5.125e-07, 1.867e+09),
(5.135e-07, 1.861e+09),
(5.145e-07, 1.874e+09),
(5.155e-07, 1.900e+09),
(5.165e-07, 1.669e+09),
(5.175e-07, 1.726e+09),
(5.185e-07, 1.654e+09),
(5.195e-07, 1.828e+09),
(5.205e-07, 1.831e+09),
(5.215e-07, 1.906e+09),
(5.225e-07, 1.823e+09),
(5.235e-07, 1.894e+09),
(5.245e-07, 1.958e+09),
(5.255e-07, 1.930e+09),
(5.265e-07, 1.674e+09),
(5.275e-07, 1.828e+09),
(5.285e-07, 1.897e+09),
(5.295e-07, 1.918e+09),
(5.305e-07, 1.952e+09),
(5.315e-07, 1.963e+09),
(5.325e-07, 1.770e+09),
(5.335e-07, 1.923e+09),
(5.345e-07, 1.858e+09),
(5.355e-07, 1.990e+09),
(5.365e-07, 1.871e+09),
(5.375e-07, 1.882e+09),
(5.385e-07, 1.904e+09),
(5.395e-07, 1.832e+09),
(5.405e-07, 1.769e+09),
(5.415e-07, 1.881e+09),
(5.425e-07, 1.825e+09),
(5.435e-07, 1.879e+09),
(5.445e-07, 1.879e+09),
(5.455e-07, 1.901e+09),
(5.465e-07, 1.879e+09),
(5.475e-07, 1.833e+09),
(5.485e-07, 1.863e+09),
(5.495e-07, 1.895e+09),
(5.505e-07, 1.862e+09),
(5.515e-07, 1.871e+09),
(5.525e-07, 1.846e+09),
(5.535e-07, 1.882e+09),
(5.545e-07, 1.898e+09),
(5.555e-07, 1.897e+09),
(5.565e-07, 1.821e+09),
(5.575e-07, 1.846e+09),
(5.585e-07, 1.787e+09),
(5.595e-07, 1.808e+09),
(5.605e-07, 1.843e+09),
(5.615e-07, 1.824e+09),
(5.625e-07, 1.850e+09),
(5.635e-07, 1.861e+09),
(5.645e-07, 1.854e+09),
(5.655e-07, 1.798e+09),
(5.665e-07, 1.829e+09),
(5.675e-07, 1.887e+09),
(5.685e-07, 1.810e+09),
(5.695e-07, 1.860e+09),
(5.705e-07, 1.769e+09),
(5.715e-07, 1.823e+09),
(5.725e-07, 1.892e+09),
(5.735e-07, 1.876e+09),
(5.745e-07, 1.867e+09),
(5.755e-07, 1.830e+09),
(5.765e-07, 1.846e+09),
(5.775e-07, 1.857e+09),
(5.785e-07, 1.783e+09),
(5.795e-07, 1.828e+09),
(5.805e-07, 1.838e+09),
(5.815e-07, 1.853e+09),
(5.825e-07, 1.873e+09),
(5.835e-07, 1.857e+09),
(5.845e-07, 1.860e+09),
(5.855e-07, 1.783e+09),
(5.865e-07, 1.830e+09),
(5.875e-07, 1.848e+09),
(5.885e-07, 1.750e+09),
(5.895e-07, 1.612e+09),
(5.905e-07, 1.813e+09),
(5.915e-07, 1.787e+09),
(5.925e-07, 1.808e+09),
(5.935e-07, 1.796e+09),
(5.945e-07, 1.773e+09),
(5.955e-07, 1.782e+09),
(5.965e-07, 1.805e+09),
(5.975e-07, 1.780e+09),
(5.985e-07, 1.757e+09),
(5.995e-07, 1.774e+09),
(6.005e-07, 1.746e+09),
(6.015e-07, 1.751e+09),
(6.025e-07, 1.719e+09),
(6.035e-07, 1.787e+09),
(6.045e-07, 1.776e+09),
(6.055e-07, 1.763e+09),
(6.065e-07, 1.759e+09),
(6.075e-07, 1.757e+09),
(6.085e-07, 1.743e+09),
(6.095e-07, 1.744e+09),
(6.105e-07, 1.703e+09),
(6.115e-07, 1.746e+09),
(6.125e-07, 1.705e+09),
(6.135e-07, 1.683e+09),
(6.145e-07, 1.713e+09),
(6.155e-07, 1.713e+09),
(6.165e-07, 1.609e+09),
(6.175e-07, 1.707e+09),
(6.185e-07, 1.724e+09),
(6.195e-07, 1.707e+09),
(6.205e-07, 1.734e+09),
(6.215e-07, 1.690e+09),
(6.225e-07, 1.713e+09),
(6.235e-07, 1.666e+09),
(6.245e-07, 1.656e+09),
(6.255e-07, 1.632e+09),
(6.265e-07, 1.697e+09),
(6.275e-07, 1.697e+09),
(6.285e-07, 1.697e+09),
(6.295e-07, 1.677e+09),
(6.310e-07, 1.639e+09),
(6.330e-07, 1.651e+09),
(6.350e-07, 1.656e+09),
(6.370e-07, 1.654e+09),
(6.390e-07, 1.651e+09),
(6.410e-07, 1.614e+09),
(6.430e-07, 1.621e+09),
(6.450e-07, 1.627e+09),
(6.470e-07, 1.603e+09),
(6.490e-07, 1.558e+09),
(6.510e-07, 1.606e+09),
(6.530e-07, 1.599e+09),
(6.550e-07, 1.532e+09),
(6.570e-07, 1.384e+09),
(6.590e-07, 1.549e+09),
(6.610e-07, 1.571e+09),
(6.630e-07, 1.555e+09),
(6.650e-07, 1.560e+09),
(6.670e-07, 1.535e+09),
(6.690e-07, 1.546e+09),
(6.710e-07, 1.516e+09),
(6.730e-07, 1.521e+09),
(6.750e-07, 1.510e+09),
(6.770e-07, 1.508e+09),
(6.790e-07, 1.498e+09),
(6.810e-07, 1.492e+09),
(6.830e-07, 1.479e+09),
(6.850e-07, 1.455e+09),
(6.870e-07, 1.467e+09),
(6.890e-07, 1.461e+09),
(6.910e-07, 1.448e+09),
(6.930e-07, 1.448e+09),
(6.950e-07, 1.436e+09),
(6.970e-07, 1.416e+09),
(6.990e-07, 1.425e+09),
(7.010e-07, 1.386e+09),
(7.030e-07, 1.388e+09),
(7.050e-07, 1.415e+09),
(7.070e-07, 1.400e+09),
(7.090e-07, 1.384e+09),
(7.110e-07, 1.385e+09),
(7.130e-07, 1.373e+09),
(7.150e-07, 1.366e+09),
(7.170e-07, 1.354e+09),
(7.190e-07, 1.328e+09),
(7.210e-07, 1.331e+09),
(7.230e-07, 1.348e+09),
(7.250e-07, 1.350e+09),
(7.270e-07, 1.346e+09),
(7.290e-07, 1.319e+09),
(7.310e-07, 1.326e+09),
(7.330e-07, 1.318e+09),
(7.350e-07, 1.309e+09),
(7.370e-07, 1.307e+09),
(7.390e-07, 1.278e+09),
(7.410e-07, 1.258e+09),
(7.430e-07, 1.286e+09),
(7.450e-07, 1.279e+09),
(7.470e-07, 1.283e+09),
(7.490e-07, 1.270e+09),
(7.510e-07, 1.262e+09),
(7.530e-07, 1.259e+09),
(7.550e-07, 1.255e+09),
(7.570e-07, 1.248e+09),
(7.590e-07, 1.240e+09),
(7.610e-07, 1.237e+09),
(7.630e-07, 1.241e+09),
(7.650e-07, 1.221e+09),
(7.670e-07, 1.185e+09),
(7.690e-07, 1.203e+09),
(7.710e-07, 1.204e+09),
(7.730e-07, 1.208e+09),
(7.750e-07, 1.188e+09),
(7.770e-07, 1.196e+09),
(7.790e-07, 1.187e+09),
(7.810e-07, 1.187e+09),
(7.830e-07, 1.176e+09),
(7.850e-07, 1.180e+09),
(7.870e-07, 1.177e+09),
(7.890e-07, 1.174e+09),
(7.910e-07, 1.158e+09),
(7.930e-07, 1.143e+09),
(7.950e-07, 1.134e+09),
(7.970e-07, 1.152e+09),
(7.990e-07, 1.135e+09),
(8.010e-07, 1.142e+09),
(8.030e-07, 1.129e+09),
(8.050e-07, 1.115e+09),
(8.070e-07, 1.120e+09),
(8.090e-07, 1.095e+09),
(8.110e-07, 1.114e+09),
(8.130e-07, 1.115e+09),
(8.150e-07, 1.107e+09),
(8.170e-07, 1.104e+09),
(8.190e-07, 1.063e+09),
(8.210e-07, 1.080e+09),
(8.230e-07, 1.073e+09),
(8.250e-07, 1.075e+09),
(8.260e-07, 1.080e+09),
(8.280e-07, 1.081e+09),
(8.300e-07, 1.063e+09),
(8.320e-07, 1.051e+09),
(8.340e-07, 1.041e+09),
(8.360e-07, 1.052e+09),
(8.380e-07, 1.044e+09),
(8.400e-07, 1.040e+09),
(8.420e-07, 1.036e+09),
(8.440e-07, 1.024e+09),
(8.460e-07, 1.028e+09),
(8.480e-07, 1.023e+09),
(8.500e-07, 9.660e+08),
(8.520e-07, 9.961e+08),
(8.540e-07, 8.780e+08),
(8.560e-07, 9.755e+08),
(8.580e-07, 1.005e+09),
(8.600e-07, 9.969e+08),
(8.620e-07, 9.949e+08),
(8.640e-07, 9.993e+08),
(8.660e-07, 8.862e+08),
(8.680e-07, 9.395e+08),
(8.700e-07, 9.747e+08),
(8.720e-07, 9.833e+08),
(8.740e-07, 9.713e+08),
(8.760e-07, 9.640e+08),
(8.780e-07, 9.749e+08),
(8.800e-07, 9.554e+08),
(8.820e-07, 9.511e+08),
(8.840e-07, 9.579e+08),
(8.860e-07, 9.383e+08),
(8.880e-07, 9.443e+08),
(8.900e-07, 9.530e+08),
(8.920e-07, 9.394e+08),
(8.940e-07, 9.332e+08),
(8.960e-07, 9.387e+08),
(8.980e-07, 9.339e+08),
(9.000e-07, 9.158e+08),
(9.020e-07, 8.916e+08),
(9.040e-07, 9.285e+08),
(9.060e-07, 9.176e+08),
(9.080e-07, 9.025e+08),
(9.100e-07, 8.916e+08),
(9.120e-07, 8.967e+08),
(9.140e-07, 9.071e+08),
(9.160e-07, 9.004e+08),
(9.180e-07, 8.951e+08),
(9.200e-07, 8.908e+08),
(9.220e-07, 8.630e+08),
(9.240e-07, 8.585e+08),
(9.260e-07, 8.612e+08),
(9.280e-07, 8.769e+08),
(9.300e-07, 8.677e+08),
(9.320e-07, 8.651e+08),
(9.340e-07, 8.641e+08),
(9.360e-07, 8.547e+08),
(9.380e-07, 8.580e+08),
(9.400e-07, 8.438e+08),
(9.420e-07, 8.250e+08),
(9.440e-07, 8.324e+08),
(9.460e-07, 8.375e+08),
(9.480e-07, 8.407e+08),
(9.500e-07, 8.369e+08),
(9.520e-07, 8.317e+08),
(9.540e-07, 8.080e+08),
(9.560e-07, 8.082e+08),
(9.580e-07, 8.188e+08),
(9.600e-07, 8.151e+08),
(9.620e-07, 8.089e+08),
(9.640e-07, 8.013e+08),
(9.660e-07, 7.947e+08),
(9.680e-07, 7.969e+08),
(9.700e-07, 7.959e+08),
(9.720e-07, 7.936e+08),
(9.740e-07, 7.815e+08),
(9.760e-07, 7.825e+08),
(9.780e-07, 7.779e+08),
(9.800e-07, 7.746e+08),
(9.820e-07, 7.764e+08),
(9.840e-07, 7.698e+08),
(9.860e-07, 7.661e+08),
(9.880e-07, 7.615e+08),
(9.900e-07, 7.541e+08),
(9.920e-07, 7.567e+08),
(9.940e-07, 7.556e+08),
(9.960e-07, 7.525e+08),
(9.980e-07, 7.510e+08),
(1.000e-06, 7.479e+08),
(1.002e-06, 7.469e+08),
(1.004e-06, 7.261e+08),
(1.006e-06, 7.136e+08),
(1.008e-06, 7.335e+08),
(1.010e-06, 7.313e+08),
(1.012e-06, 7.262e+08),
(1.014e-06, 7.210e+08),
(1.016e-06, 7.139e+08),
(1.018e-06, 7.107e+08),
(1.020e-06, 7.041e+08),
(1.022e-06, 7.021e+08),
(1.024e-06, 7.054e+08),
(1.026e-06, 7.027e+08),
(1.028e-06, 6.989e+08),
(1.030e-06, 6.937e+08),
(1.032e-06, 6.905e+08),
(1.034e-06, 6.817e+08),
(1.036e-06, 6.840e+08),
(1.038e-06, 6.772e+08),
(1.040e-06, 6.761e+08),
(1.042e-06, 6.746e+08),
(1.044e-06, 6.714e+08),
(1.046e-06, 6.600e+08),
(1.048e-06, 6.644e+08),
(1.050e-06, 6.622e+08),
(1.052e-06, 6.586e+08),
(1.054e-06, 6.549e+08),
(1.056e-06, 6.557e+08),
(1.058e-06, 6.451e+08),
(1.060e-06, 6.415e+08),
(1.062e-06, 6.438e+08),
(1.064e-06, 6.459e+08),
(1.066e-06, 6.395e+08),
(1.068e-06, 6.317e+08),
(1.070e-06, 6.241e+08),
(1.072e-06, 6.326e+08),
(1.074e-06, 6.276e+08),
(1.076e-06, 6.280e+08),
(1.078e-06, 6.272e+08),
(1.080e-06, 6.247e+08),
(1.082e-06, 6.099e+08),
(1.084e-06, 6.180e+08),
(1.086e-06, 6.208e+08),
(1.088e-06, 6.103e+08),
(1.090e-06, 6.199e+08),
(1.092e-06, 6.159e+08),
(1.094e-06, 5.849e+08),
(1.096e-06, 5.983e+08),
(1.098e-06, 5.961e+08),
(1.100e-06, 6.042e+08),
(1.102e-06, 5.932e+08),
(1.104e-06, 5.974e+08),
(1.106e-06, 5.945e+08),
(1.108e-06, 5.916e+08),
(1.110e-06, 5.906e+08),
(1.112e-06, 5.843e+08),
(1.114e-06, 5.844e+08),
(1.116e-06, 5.831e+08),
(1.118e-06, 5.815e+08),
(1.120e-06, 5.741e+08),
(1.122e-06, 5.796e+08),
(1.124e-06, 5.769e+08),
(1.126e-06, 5.655e+08),
(1.128e-06, 5.700e+08),
(1.130e-06, 5.653e+08),
(1.132e-06, 5.678e+08),
(1.134e-06, 5.638e+08),
(1.136e-06, 5.658e+08),
(1.138e-06, 5.569e+08),
(1.140e-06, 5.530e+08),
(1.142e-06, 5.531e+08),
(1.144e-06, 5.514e+08),
(1.146e-06, 5.548e+08),
(1.148e-06, 5.525e+08),
(1.150e-06, 5.489e+08),
(1.152e-06, 5.458e+08),
(1.154e-06, 5.479e+08),
(1.156e-06, 5.455e+08),
(1.158e-06, 5.435e+08),
(1.160e-06, 5.320e+08),
(1.162e-06, 5.325e+08),
(1.164e-06, 5.332e+08),
(1.166e-06, 5.303e+08),
(1.168e-06, 5.312e+08),
(1.170e-06, 5.276e+08),
(1.172e-06, 5.315e+08),
(1.174e-06, 5.273e+08),
(1.176e-06, 5.184e+08),
(1.178e-06, 5.190e+08),
(1.180e-06, 5.239e+08),
(1.182e-06, 5.159e+08),
(1.184e-06, 5.103e+08),
(1.186e-06, 5.187e+08),
(1.188e-06, 5.075e+08),
(1.190e-06, 5.085e+08),
(1.192e-06, 5.161e+08),
(1.194e-06, 5.145e+08),
(1.196e-06, 5.084e+08),
(1.198e-06, 4.943e+08),
(1.200e-06, 5.003e+08),
(1.202e-06, 5.068e+08),
(1.204e-06, 4.948e+08),
(1.206e-06, 5.039e+08),
(1.208e-06, 4.890e+08),
(1.210e-06, 4.882e+08),
(1.212e-06, 4.933e+08),
(1.214e-06, 4.942e+08),
(1.216e-06, 4.930e+08),
(1.218e-06, 4.897e+08),
(1.220e-06, 4.875e+08),
(1.222e-06, 4.854e+08),
(1.224e-06, 4.846e+08),
(1.226e-06, 4.817e+08),
(1.228e-06, 4.771e+08),
(1.230e-06, 4.792e+08),
(1.232e-06, 4.750e+08),
(1.234e-06, 4.729e+08),
(1.236e-06, 4.719e+08),
(1.238e-06, 4.703e+08),
(1.240e-06, 4.653e+08),
(1.242e-06, 4.642e+08),
(1.244e-06, 4.619e+08),
(1.246e-06, 4.635e+08),
(1.248e-06, 4.633e+08),
(1.250e-06, 4.624e+08),
(1.252e-06, 4.571e+08),
(1.254e-06, 4.574e+08),
(1.256e-06, 4.551e+08),
(1.258e-06, 4.533e+08),
(1.260e-06, 4.530e+08),
(1.262e-06, 4.497e+08),
(1.264e-06, 4.478e+08),
(1.266e-06, 4.467e+08),
(1.268e-06, 4.417e+08),
(1.270e-06, 4.453e+08),
(1.272e-06, 4.452e+08),
(1.274e-06, 4.431e+08),
(1.276e-06, 4.451e+08),
(1.278e-06, 4.440e+08),
(1.280e-06, 4.356e+08),
(1.282e-06, 4.014e+08),
(1.284e-06, 4.259e+08),
(1.286e-06, 4.328e+08),
(1.288e-06, 4.314e+08),
(1.290e-06, 4.255e+08),
(1.292e-06, 4.254e+08),
(1.294e-06, 4.223e+08),
(1.296e-06, 4.224e+08),
(1.298e-06, 4.184e+08),
(1.300e-06, 4.186e+08),
(1.302e-06, 4.139e+08),
(1.304e-06, 4.111e+08),
(1.306e-06, 4.136e+08),
(1.308e-06, 4.123e+08),
(1.310e-06, 4.106e+08),
(1.312e-06, 4.033e+08),
(1.314e-06, 4.022e+08),
(1.316e-06, 3.979e+08),
(1.318e-06, 4.017e+08),
(1.320e-06, 4.016e+08),
(1.322e-06, 3.986e+08),
(1.324e-06, 3.981e+08),
(1.326e-06, 3.949e+08),
(1.328e-06, 3.908e+08),
(1.330e-06, 3.878e+08),
(1.332e-06, 3.863e+08),
(1.334e-06, 3.892e+08),
(1.336e-06, 3.866e+08),
(1.338e-06, 3.832e+08),
(1.340e-06, 3.790e+08),
(1.342e-06, 3.805e+08),
(1.344e-06, 3.798e+08),
(1.346e-06, 3.772e+08),
(1.348e-06, 3.766e+08),
(1.350e-06, 3.724e+08),
(1.352e-06, 3.742e+08),
(1.354e-06, 3.722e+08),
(1.356e-06, 3.675e+08),
(1.358e-06, 3.688e+08),
(1.360e-06, 3.673e+08),
(1.362e-06, 3.677e+08),
(1.364e-06, 3.657e+08),
(1.366e-06, 3.657e+08),
(1.368e-06, 3.628e+08),
(1.370e-06, 3.599e+08),
(1.372e-06, 3.621e+08),
(1.374e-06, 3.611e+08),
(1.376e-06, 3.561e+08),
(1.378e-06, 3.580e+08),
(1.380e-06, 3.579e+08),
(1.382e-06, 3.545e+08),
(1.384e-06, 3.547e+08),
(1.386e-06, 3.532e+08),
(1.388e-06, 3.530e+08),
(1.390e-06, 3.506e+08),
(1.392e-06, 3.513e+08),
(1.394e-06, 3.488e+08),
(1.396e-06, 3.487e+08),
(1.398e-06, 3.492e+08),
(1.400e-06, 3.427e+08),
(1.402e-06, 3.439e+08),
(1.404e-06, 3.428e+08),
(1.406e-06, 3.431e+08),
(1.408e-06, 3.427e+08),
(1.410e-06, 3.418e+08),
(1.412e-06, 3.348e+08),
(1.414e-06, 3.377e+08),
(1.416e-06, 3.385e+08),
(1.418e-06, 3.386e+08),
(1.420e-06, 3.357e+08),
(1.422e-06, 3.315e+08),
(1.424e-06, 3.311e+08),
(1.426e-06, 3.281e+08),
(1.428e-06, 3.285e+08),
(1.430e-06, 3.257e+08),
(1.432e-06, 3.300e+08),
(1.434e-06, 3.284e+08),
(1.436e-06, 3.285e+08),
(1.438e-06, 3.283e+08),
(1.440e-06, 3.188e+08),
(1.442e-06, 3.186e+08),
(1.444e-06, 3.197e+08),
(1.446e-06, 3.216e+08),
(1.448e-06, 3.216e+08),
(1.450e-06, 3.187e+08),
(1.452e-06, 3.154e+08),
(1.454e-06, 3.143e+08),
(1.456e-06, 3.131e+08),
(1.458e-06, 3.167e+08),
(1.460e-06, 3.156e+08),
(1.462e-06, 3.121e+08),
(1.464e-06, 3.105e+08),
(1.466e-06, 3.108e+08),
(1.468e-06, 3.114e+08),
(1.470e-06, 3.102e+08),
(1.472e-06, 3.073e+08),
(1.474e-06, 3.034e+08),
(1.476e-06, 3.048e+08),
(1.478e-06, 3.044e+08),
(1.480e-06, 3.068e+08),
(1.482e-06, 3.044e+08),
(1.484e-06, 3.039e+08),
(1.486e-06, 3.033e+08),
(1.488e-06, 2.855e+08),
(1.490e-06, 3.015e+08),
(1.492e-06, 3.018e+08),
(1.494e-06, 3.033e+08),
(1.496e-06, 2.972e+08),
(1.498e-06, 2.994e+08),
(1.500e-06, 3.011e+08),
(1.502e-06, 2.924e+08),
(1.504e-06, 2.799e+08),
(1.506e-06, 2.848e+08),
(1.508e-06, 2.919e+08),
(1.510e-06, 2.947e+08),
(1.512e-06, 2.913e+08),
(1.514e-06, 2.883e+08),
(1.516e-06, 2.882e+08),
(1.518e-06, 2.884e+08),
(1.520e-06, 2.866e+08),
(1.522e-06, 2.824e+08),
(1.524e-06, 2.835e+08),
(1.526e-06, 2.846e+08),
(1.528e-06, 2.846e+08),
(1.530e-06, 2.765e+08),
(1.532e-06, 2.823e+08),
(1.534e-06, 2.784e+08),
(1.536e-06, 2.806e+08),
(1.538e-06, 2.773e+08),
(1.540e-06, 2.730e+08),
(1.542e-06, 2.753e+08),
(1.544e-06, 2.778e+08),
(1.546e-06, 2.772e+08),
(1.548e-06, 2.711e+08),
(1.550e-06, 2.713e+08),
(1.552e-06, 2.731e+08),
(1.554e-06, 2.676e+08),
(1.556e-06, 2.671e+08),
(1.558e-06, 2.689e+08),
(1.560e-06, 2.683e+08),
(1.562e-06, 2.697e+08),
(1.564e-06, 2.669e+08),
(1.566e-06, 2.654e+08),
(1.568e-06, 2.633e+08),
(1.570e-06, 2.645e+08),
(1.572e-06, 2.673e+08),
(1.574e-06, 2.610e+08),
(1.576e-06, 2.536e+08),
(1.578e-06, 2.547e+08),
(1.580e-06, 2.650e+08),
(1.582e-06, 2.590e+08),
(1.584e-06, 2.591e+08),
(1.586e-06, 2.599e+08),
(1.588e-06, 2.490e+08),
(1.590e-06, 2.405e+08),
(1.592e-06, 2.526e+08),
(1.594e-06, 2.583e+08),
(1.596e-06, 2.506e+08),
(1.598e-06, 2.545e+08),
(1.600e-06, 2.512e+08),
(1.602e-06, 2.489e+08),
(1.604e-06, 2.497e+08),
(1.606e-06, 2.477e+08),
(1.608e-06, 2.491e+08),
(1.610e-06, 2.400e+08),
(1.612e-06, 2.430e+08),
(1.614e-06, 2.449e+08),
(1.616e-06, 2.374e+08),
(1.618e-06, 2.423e+08),
(1.620e-06, 2.369e+08),
(1.622e-06, 2.383e+08),
(1.624e-06, 2.416e+08),
(1.626e-06, 2.402e+08),
(1.628e-06, 2.418e+08),
(1.630e-06, 2.393e+08),
(1.632e-06, 2.387e+08),
(1.634e-06, 2.359e+08),
(1.636e-06, 2.357e+08),
(1.638e-06, 2.274e+08),
(1.640e-06, 2.262e+08),
(1.642e-06, 2.266e+08),
(1.644e-06, 2.278e+08),
(1.646e-06, 2.294e+08),
(1.648e-06, 2.292e+08),
(1.650e-06, 2.272e+08),
(1.652e-06, 2.268e+08),
(1.654e-06, 2.262e+08),
(1.656e-06, 2.260e+08),
(1.658e-06, 2.252e+08),
(1.660e-06, 2.245e+08),
(1.662e-06, 2.246e+08),
(1.664e-06, 2.227e+08),
(1.666e-06, 2.212e+08),
(1.668e-06, 2.193e+08),
(1.670e-06, 2.225e+08),
(1.672e-06, 2.173e+08),
(1.674e-06, 2.193e+08),
(1.676e-06, 2.161e+08),
(1.678e-06, 2.168e+08),
(1.680e-06, 2.080e+08),
(1.682e-06, 2.054e+08),
(1.684e-06, 2.129e+08),
(1.686e-06, 2.131e+08),
(1.688e-06, 2.120e+08),
(1.690e-06, 2.105e+08),
(1.692e-06, 2.123e+08),
(1.694e-06, 2.112e+08),
(1.696e-06, 2.100e+08),
(1.698e-06, 2.089e+08),
(1.700e-06, 2.063e+08),
(1.702e-06, 2.047e+08),
(1.704e-06, 2.052e+08),
(1.706e-06, 2.050e+08),
(1.708e-06, 2.017e+08),
(1.710e-06, 2.013e+08),
(1.712e-06, 1.982e+08),
(1.714e-06, 2.037e+08),
(1.716e-06, 2.022e+08),
(1.718e-06, 2.010e+08),
(1.720e-06, 1.993e+08),
(1.722e-06, 1.975e+08),
(1.724e-06, 1.954e+08),
(1.726e-06, 1.982e+08),
(1.728e-06, 1.971e+08),
(1.730e-06, 1.984e+08),
(1.732e-06, 1.936e+08),
(1.734e-06, 1.874e+08),
(1.736e-06, 1.827e+08),
(1.738e-06, 1.863e+08),
(1.740e-06, 1.905e+08),
(1.742e-06, 1.902e+08),
(1.744e-06, 1.907e+08),
(1.746e-06, 1.867e+08),
(1.748e-06, 1.872e+08),
(1.750e-06, 1.858e+08),
(1.752e-06, 1.850e+08),
(1.754e-06, 1.856e+08),
(1.756e-06, 1.849e+08),
(1.758e-06, 1.843e+08),
(1.760e-06, 1.831e+08),
(1.762e-06, 1.793e+08),
(1.764e-06, 1.807e+08),
(1.766e-06, 1.817e+08),
(1.768e-06, 1.802e+08),
(1.770e-06, 1.791e+08),
(1.772e-06, 1.794e+08),
(1.774e-06, 1.792e+08),
(1.776e-06, 1.763e+08),
(1.778e-06, 1.747e+08),
(1.780e-06, 1.756e+08),
(1.782e-06, 1.747e+08),
(1.784e-06, 1.735e+08),
(1.786e-06, 1.739e+08),
(1.788e-06, 1.747e+08),
(1.790e-06, 1.733e+08),
(1.792e-06, 1.721e+08),
(1.794e-06, 1.709e+08),
(1.796e-06, 1.706e+08),
(1.798e-06, 1.703e+08),
(1.800e-06, 1.699e+08),
(1.802e-06, 1.672e+08),
(1.804e-06, 1.688e+08),
(1.806e-06, 1.688e+08),
(1.808e-06, 1.685e+08),
(1.810e-06, 1.686e+08),
(1.812e-06, 1.675e+08),
(1.814e-06, 1.658e+08),
(1.816e-06, 1.605e+08),
(1.818e-06, 1.520e+08),
(1.820e-06, 1.596e+08),
(1.822e-06, 1.598e+08),
(1.824e-06, 1.624e+08),
(1.826e-06, 1.628e+08),
(1.828e-06, 1.611e+08),
(1.830e-06, 1.606e+08),
(1.832e-06, 1.593e+08),
(1.834e-06, 1.585e+08),
(1.836e-06, 1.581e+08),
(1.838e-06, 1.562e+08),
(1.840e-06, 1.562e+08),
(1.842e-06, 1.540e+08),
(1.844e-06, 1.541e+08),
(1.846e-06, 1.535e+08),
(1.848e-06, 1.510e+08),
(1.850e-06, 1.546e+08),
(1.852e-06, 1.534e+08),
(1.854e-06, 1.525e+08),
(1.856e-06, 1.509e+08),
(1.858e-06, 1.525e+08),
(1.860e-06, 1.503e+08),
(1.862e-06, 1.504e+08),
(1.864e-06, 1.509e+08),
(1.866e-06, 1.494e+08),
(1.868e-06, 1.492e+08),
(1.870e-06, 1.508e+08),
(1.872e-06, 1.473e+08),
(1.874e-06, 1.401e+08),
(1.876e-06, 1.299e+08),
(1.878e-06, 1.441e+08),
(1.880e-06, 1.462e+08),
(1.882e-06, 1.474e+08),
(1.884e-06, 1.464e+08),
(1.886e-06, 1.439e+08),
(1.888e-06, 1.453e+08),
(1.890e-06, 1.424e+08),
(1.892e-06, 1.408e+08),
(1.894e-06, 1.396e+08),
(1.896e-06, 1.373e+08),
(1.898e-06, 1.390e+08),
(1.900e-06, 1.397e+08),
(1.902e-06, 1.409e+08),
(1.904e-06, 1.386e+08),
(1.906e-06, 1.390e+08),
(1.908e-06, 1.377e+08),
(1.910e-06, 1.378e+08),
(1.912e-06, 1.354e+08),
(1.914e-06, 1.370e+08),
(1.916e-06, 1.360e+08),
(1.918e-06, 1.353e+08),
(1.920e-06, 1.333e+08),
(1.922e-06, 1.350e+08),
(1.924e-06, 1.341e+08),
(1.926e-06, 1.344e+08),
(1.928e-06, 1.322e+08),
(1.930e-06, 1.313e+08),
(1.932e-06, 1.308e+08),
(1.934e-06, 1.320e+08),
(1.936e-06, 1.328e+08),
(1.938e-06, 1.321e+08),
(1.940e-06, 1.299e+08),
(1.942e-06, 1.294e+08),
(1.944e-06, 1.203e+08),
(1.946e-06, 1.192e+08),
(1.948e-06, 1.271e+08),
(1.950e-06, 1.261e+08),
(1.952e-06, 1.255e+08),
(1.954e-06, 1.286e+08),
(1.956e-06, 1.276e+08),
(1.958e-06, 1.271e+08),
(1.960e-06, 1.261e+08),
(1.962e-06, 1.240e+08),
(1.964e-06, 1.222e+08),
(1.966e-06, 1.231e+08),
(1.968e-06, 1.240e+08),
(1.970e-06, 1.239e+08),
(1.972e-06, 1.213e+08),
(1.974e-06, 1.208e+08),
(1.976e-06, 1.224e+08),
(1.978e-06, 1.194e+08),
(1.980e-06, 1.196e+08),
(1.982e-06, 1.205e+08),
(1.984e-06, 1.197e+08),
(1.986e-06, 1.178e+08),
(1.988e-06, 1.195e+08),
(1.990e-06, 1.198e+08),
(1.992e-06, 1.180e+08),
(1.994e-06, 1.162e+08),
(1.996e-06, 1.173e+08),
(1.998e-06, 1.159e+08),
(2.000e-06, 1.170e+08),
(2.002e-06, 1.161e+08),
(2.004e-06, 1.148e+08),
(2.006e-06, 1.147e+08),
(2.008e-06, 1.154e+08),
(2.010e-06, 1.149e+08),
(2.012e-06, 1.145e+08),
(2.014e-06, 1.138e+08),
(2.016e-06, 1.137e+08),
(2.018e-06, 1.134e+08),
(2.020e-06, 1.116e+08),
(2.022e-06, 1.107e+08),
(2.024e-06, 1.116e+08),
(2.026e-06, 1.115e+08),
(2.028e-06, 1.107e+08),
(2.030e-06, 1.086e+08),
(2.032e-06, 1.098e+08),
(2.034e-06, 1.092e+08),
(2.036e-06, 1.083e+08),
(2.038e-06, 1.064e+08),
(2.040e-06, 1.078e+08),
(2.042e-06, 1.076e+08),
(2.044e-06, 1.076e+08),
(2.046e-06, 1.071e+08),
(2.048e-06, 1.063e+08),
(2.050e-06, 1.059e+08),
(2.052e-06, 1.047e+08),
(2.054e-06, 1.046e+08),
(2.056e-06, 1.046e+08),
(2.058e-06, 1.040e+08),
(2.060e-06, 1.028e+08),
(2.062e-06, 1.023e+08),
(2.064e-06, 1.005e+08),
(2.066e-06, 1.025e+08),
(2.068e-06, 1.019e+08),
(2.070e-06, 1.003e+08),
(2.072e-06, 1.004e+08),
(2.074e-06, 1.009e+08),
(2.076e-06, 1.006e+08),
(2.078e-06, 1.000e+08),
(2.080e-06, 9.878e+07),
(2.082e-06, 9.864e+07),
(2.084e-06, 9.772e+07),
(2.086e-06, 9.852e+07),
(2.088e-06, 9.835e+07),
(2.090e-06, 9.788e+07),
(2.092e-06, 9.567e+07),
(2.094e-06, 9.593e+07),
(2.096e-06, 9.580e+07),
(2.098e-06, 9.620e+07),
(2.100e-06, 9.606e+07),
(2.102e-06, 9.577e+07),
(2.104e-06, 9.559e+07),
(2.106e-06, 9.574e+07),
(2.108e-06, 9.513e+07),
(2.110e-06, 9.396e+07),
(2.112e-06, 9.452e+07),
(2.114e-06, 9.436e+07),
(2.116e-06, 9.331e+07),
(2.118e-06, 9.311e+07),
(2.120e-06, 9.275e+07),
(2.122e-06, 9.275e+07),
(2.124e-06, 9.189e+07),
(2.126e-06, 9.208e+07),
(2.128e-06, 9.225e+07),
(2.130e-06, 9.209e+07),
(2.132e-06, 9.210e+07),
(2.134e-06, 9.155e+07),
(2.136e-06, 9.012e+07),
(2.138e-06, 9.110e+07),
(2.140e-06, 9.083e+07),
(2.142e-06, 9.064e+07),
(2.144e-06, 9.006e+07),
(2.146e-06, 8.939e+07),
(2.148e-06, 8.979e+07),
(2.150e-06, 8.957e+07),
(2.152e-06, 8.913e+07),
(2.154e-06, 8.878e+07),
(2.156e-06, 8.874e+07),
(2.158e-06, 8.842e+07),
(2.160e-06, 8.781e+07),
(2.162e-06, 8.686e+07),
(2.164e-06, 8.456e+07),
(2.166e-06, 7.849e+07),
(2.168e-06, 8.300e+07),
(2.170e-06, 8.557e+07),
(2.172e-06, 8.591e+07),
(2.174e-06, 8.592e+07),
(2.176e-06, 8.532e+07),
(2.178e-06, 8.425e+07),
(2.180e-06, 8.497e+07),
(2.182e-06, 8.425e+07),
(2.184e-06, 8.457e+07),
(2.186e-06, 8.465e+07),
(2.188e-06, 8.277e+07),
(2.190e-06, 8.304e+07),
(2.192e-06, 8.377e+07),
(2.194e-06, 8.349e+07),
(2.196e-06, 8.318e+07),
(2.198e-06, 8.299e+07),
(2.200e-06, 8.265e+07),
(2.202e-06, 8.230e+07),
(2.204e-06, 8.211e+07),
(2.206e-06, 7.966e+07),
(2.208e-06, 7.966e+07),
(2.210e-06, 8.080e+07),
(2.212e-06, 8.105e+07),
(2.214e-06, 8.072e+07),
(2.216e-06, 7.994e+07),
(2.218e-06, 7.970e+07),
(2.220e-06, 7.997e+07),
(2.222e-06, 7.962e+07),
(2.224e-06, 7.926e+07),
(2.226e-06, 7.811e+07),
(2.228e-06, 7.826e+07),
(2.230e-06, 7.831e+07),
(2.232e-06, 7.815e+07),
(2.234e-06, 7.802e+07),
(2.236e-06, 7.758e+07),
(2.238e-06, 7.648e+07),
(2.240e-06, 7.639e+07),
(2.242e-06, 7.642e+07),
(2.244e-06, 7.624e+07),
(2.246e-06, 7.612e+07),
(2.248e-06, 7.520e+07),
(2.250e-06, 7.541e+07),
(2.252e-06, 7.512e+07),
(2.254e-06, 7.402e+07),
(2.256e-06, 7.422e+07),
(2.258e-06, 7.441e+07),
(2.260e-06, 7.421e+07),
(2.262e-06, 7.299e+07),
(2.264e-06, 7.329e+07),
(2.266e-06, 7.315e+07),
(2.268e-06, 7.327e+07),
(2.270e-06, 7.297e+07),
(2.272e-06, 7.277e+07),
(2.274e-06, 7.252e+07),
(2.276e-06, 7.239e+07),
(2.278e-06, 7.242e+07),
(2.280e-06, 7.165e+07),
(2.282e-06, 7.007e+07),
(2.284e-06, 7.125e+07),
(2.286e-06, 7.124e+07),
(2.288e-06, 7.127e+07),
(2.290e-06, 7.110e+07),
(2.292e-06, 7.067e+07),
(2.294e-06, 6.920e+07),
(2.296e-06, 6.908e+07),
(2.298e-06, 6.919e+07),
(2.300e-06, 6.953e+07),
(2.302e-06, 6.955e+07),
(2.304e-06, 6.931e+07),
(2.306e-06, 6.923e+07),
(2.308e-06, 6.901e+07),
(2.310e-06, 6.870e+07),
(2.312e-06, 6.867e+07),
(2.314e-06, 6.826e+07),
(2.316e-06, 6.779e+07),
(2.318e-06, 6.745e+07),
(2.320e-06, 6.768e+07),
(2.322e-06, 6.675e+07),
(2.324e-06, 6.536e+07),
(2.326e-06, 6.559e+07),
(2.328e-06, 6.629e+07),
(2.330e-06, 6.616e+07),
(2.332e-06, 6.584e+07),
(2.334e-06, 6.571e+07),
(2.336e-06, 6.536e+07),
(2.338e-06, 6.496e+07),
(2.340e-06, 6.520e+07),
(2.342e-06, 6.539e+07),
(2.344e-06, 6.509e+07),
(2.346e-06, 6.486e+07),
(2.348e-06, 6.472e+07),
(2.350e-06, 6.453e+07),
(2.352e-06, 6.289e+07),
(2.354e-06, 6.239e+07),
(2.356e-06, 6.282e+07),
(2.358e-06, 6.266e+07),
(2.360e-06, 6.308e+07),
(2.362e-06, 6.305e+07),
(2.364e-06, 6.295e+07),
(2.366e-06, 6.284e+07),
(2.368e-06, 6.263e+07),
(2.370e-06, 6.211e+07),
(2.372e-06, 6.207e+07),
(2.374e-06, 6.066e+07),
(2.376e-06, 6.164e+07),
(2.378e-06, 6.192e+07),
(2.380e-06, 6.172e+07),
(2.382e-06, 6.098e+07),
(2.384e-06, 5.885e+07),
(2.386e-06, 5.908e+07),
(2.388e-06, 6.004e+07),
(2.390e-06, 6.029e+07),
(2.392e-06, 6.008e+07),
(2.394e-06, 6.003e+07),
(2.396e-06, 5.996e+07),
(2.398e-06, 5.989e+07),
(2.400e-06, 5.944e+07),
(2.402e-06, 5.965e+07),
(2.404e-06, 5.945e+07),
(2.406e-06, 5.919e+07),
(2.408e-06, 5.915e+07),
(2.410e-06, 5.902e+07),
(2.412e-06, 5.894e+07),
(2.414e-06, 5.734e+07),
(2.416e-06, 5.599e+07),
(2.418e-06, 5.748e+07),
(2.420e-06, 5.770e+07),
(2.422e-06, 5.767e+07),
(2.424e-06, 5.726e+07),
(2.426e-06, 5.717e+07),
(2.428e-06, 5.712e+07),
(2.430e-06, 5.712e+07),
(2.432e-06, 5.702e+07),
(2.434e-06, 5.641e+07),
(2.436e-06, 5.618e+07),
(2.438e-06, 5.599e+07),
(2.440e-06, 5.639e+07),
(2.442e-06, 5.617e+07),
(2.444e-06, 5.603e+07),
(2.446e-06, 5.498e+07),
(2.448e-06, 5.457e+07),
(2.450e-06, 5.462e+07),
(2.452e-06, 5.432e+07),
(2.454e-06, 5.455e+07),
(2.456e-06, 5.370e+07),
(2.458e-06, 5.392e+07),
(2.460e-06, 5.457e+07),
(2.462e-06, 5.442e+07),
(2.464e-06, 5.435e+07),
(2.466e-06, 5.405e+07),
(2.468e-06, 5.390e+07),
(2.470e-06, 5.285e+07),
(2.472e-06, 5.330e+07),
(2.474e-06, 5.313e+07),
(2.476e-06, 5.343e+07),
(2.478e-06, 5.303e+07),
(2.480e-06, 5.177e+07),
(2.482e-06, 5.140e+07),
(2.484e-06, 5.219e+07),
(2.486e-06, 5.160e+07),
(2.488e-06, 5.169e+07),
(2.490e-06, 5.225e+07),
(2.492e-06, 5.198e+07),
(2.494e-06, 5.175e+07),
(2.496e-06, 5.152e+07),
(2.498e-06, 5.154e+07),
(2.500e-06, 5.155e+07),
(2.520e-06, 4.984e+07),
(2.540e-06, 4.814e+07),
(2.560e-06, 4.672e+07),
(2.580e-06, 4.550e+07),
(2.600e-06, 4.457e+07),
(2.620e-06, 4.305e+07),
(2.640e-06, 4.211e+07),
(2.660e-06, 4.079e+07),
(2.680e-06, 3.968e+07),
(2.700e-06, 3.867e+07),
(2.720e-06, 3.763e+07),
(2.740e-06, 3.663e+07),
(2.760e-06, 3.546e+07),
(2.780e-06, 3.468e+07),
(2.800e-06, 3.385e+07),
(2.820e-06, 3.297e+07),
(2.840e-06, 3.209e+07),
(2.860e-06, 3.119e+07),
(2.880e-06, 3.032e+07),
(2.900e-06, 2.969e+07),
(2.920e-06, 2.890e+07),
(2.940e-06, 2.817e+07),
(2.960e-06, 2.750e+07),
(2.980e-06, 2.682e+07),
(3.000e-06, 2.612e+07),
(3.020e-06, 2.547e+07),
(3.040e-06, 2.465e+07),
(3.060e-06, 2.422e+07),
(3.080e-06, 2.364e+07),
(3.100e-06, 2.306e+07),
(3.120e-06, 2.246e+07),
(3.140e-06, 2.198e+07),
(3.160e-06, 2.144e+07),
(3.180e-06, 2.096e+07),
(3.200e-06, 2.048e+07),
(3.220e-06, 2.000e+07),
(3.240e-06, 1.951e+07),
(3.260e-06, 1.907e+07),
(3.280e-06, 1.858e+07),
(3.300e-06, 1.802e+07),
(3.320e-06, 1.768e+07),
(3.340e-06, 1.737e+07),
(3.360e-06, 1.697e+07),
(3.380e-06, 1.659e+07),
(3.400e-06, 1.615e+07),
(3.420e-06, 1.584e+07),
(3.440e-06, 1.554e+07),
(3.460e-06, 1.520e+07),
(3.480e-06, 1.486e+07),
(3.500e-06, 1.456e+07),
(3.520e-06, 1.425e+07),
(3.540e-06, 1.393e+07),
(3.560e-06, 1.362e+07),
(3.580e-06, 1.334e+07),
(3.600e-06, 1.307e+07),
(3.620e-06, 1.281e+07),
(3.640e-06, 1.251e+07),
(3.660e-06, 1.222e+07),
(3.680e-06, 1.193e+07),
(3.700e-06, 1.162e+07),
(3.720e-06, 1.145e+07),
(3.740e-06, 1.108e+07),
(3.760e-06, 1.096e+07),
(3.780e-06, 1.078e+07),
(3.800e-06, 1.057e+07),
(3.820e-06, 1.038e+07),
(3.840e-06, 1.019e+07),
(3.860e-06, 9.983e+06),
(3.880e-06, 9.782e+06),
(3.900e-06, 9.599e+06),
(3.920e-06, 9.427e+06),
(3.940e-06, 9.233e+06),
(3.960e-06, 9.032e+06),
(3.980e-06, 8.857e+06),
(4.000e-06, 8.669e+06),
(4.020e-06, 8.557e+06),
(4.040e-06, 8.385e+06),
(4.060e-06, 8.217e+06),
(4.080e-06, 8.054e+06),
(4.100e-06, 7.894e+06),
(4.120e-06, 7.739e+06),
(4.140e-06, 7.587e+06),
(4.160e-06, 7.439e+06),
(4.180e-06, 7.294e+06),
(4.200e-06, 7.153e+06),
(4.220e-06, 7.015e+06),
(4.240e-06, 6.881e+06),
(4.260e-06, 6.749e+06),
(4.280e-06, 6.621e+06),
(4.300e-06, 6.496e+06),
(4.320e-06, 6.374e+06),
(4.340e-06, 6.254e+06),
(4.360e-06, 6.138e+06),
(4.380e-06, 6.024e+06),
(4.400e-06, 5.913e+06),
(4.420e-06, 5.804e+06),
(4.440e-06, 5.698e+06),
(4.460e-06, 5.594e+06),
(4.480e-06, 5.492e+06),
(4.500e-06, 5.393e+06),
(4.520e-06, 5.296e+06),
(4.540e-06, 5.201e+06),
(4.560e-06, 5.108e+06),
(4.580e-06, 5.018e+06),
(4.600e-06, 4.929e+06),
(4.620e-06, 4.842e+06),
(4.640e-06, 4.757e+06),
(4.660e-06, 4.674e+06),
(4.680e-06, 4.593e+06),
(4.700e-06, 4.514e+06),
(4.720e-06, 4.436e+06),
(4.740e-06, 4.360e+06),
(4.760e-06, 4.285e+06),
(4.780e-06, 4.212e+06),
(4.800e-06, 4.141e+06),
(4.820e-06, 4.071e+06),
(4.840e-06, 4.003e+06),
(4.860e-06, 3.936e+06),
(4.880e-06, 3.870e+06),
(4.900e-06, 3.806e+06),
(4.920e-06, 3.743e+06),
(4.940e-06, 3.681e+06),
(4.960e-06, 3.621e+06),
(4.980e-06, 3.562e+06),
(5.000e-06, 3.504e+06),
(5.050e-06, 3.394e+06),
(5.100e-06, 3.267e+06),
(5.150e-06, 3.146e+06),
(5.200e-06, 3.030e+06),
(5.250e-06, 2.920e+06),
(5.300e-06, 2.815e+06),
(5.350e-06, 2.715e+06),
(5.400e-06, 2.619e+06),
(5.450e-06, 2.527e+06),
(5.500e-06, 2.439e+06),
(5.550e-06, 2.355e+06),
(5.600e-06, 2.275e+06),
(5.650e-06, 2.198e+06),
(5.700e-06, 2.124e+06),
(5.750e-06, 2.054e+06),
(5.800e-06, 1.986e+06),
(5.850e-06, 1.921e+06),
(5.900e-06, 1.859e+06),
(5.950e-06, 1.799e+06),
(6.000e-06, 1.742e+06),
(6.050e-06, 1.687e+06),
(6.100e-06, 1.634e+06),
(6.150e-06, 1.583e+06),
(6.200e-06, 1.534e+06),
(6.250e-06, 1.487e+06),
(6.300e-06, 1.442e+06),
(6.350e-06, 1.399e+06),
(6.400e-06, 1.357e+06),
(6.450e-06, 1.317e+06),
(6.500e-06, 1.278e+06),
(6.550e-06, 1.240e+06),
(6.600e-06, 1.204e+06),
(6.650e-06, 1.170e+06),
(6.700e-06, 1.136e+06),
(6.750e-06, 1.104e+06),
(6.800e-06, 1.073e+06),
(6.850e-06, 1.043e+06),
(6.900e-06, 1.014e+06),
(6.950e-06, 9.862e+05),
(7.000e-06, 9.592e+05),
(7.050e-06, 9.331e+05),
(7.100e-06, 9.080e+05),
(7.150e-06, 8.836e+05),
(7.200e-06, 8.601e+05),
(7.250e-06, 8.374e+05),
(7.300e-06, 8.154e+05),
(7.350e-06, 7.942e+05),
(7.400e-06, 7.736e+05),
(7.450e-06, 7.537e+05),
(7.500e-06, 7.344e+05),
(7.550e-06, 7.158e+05),
(7.600e-06, 6.977e+05),
(7.650e-06, 6.802e+05),
(7.700e-06, 6.633e+05),
(7.750e-06, 6.469e+05),
(7.800e-06, 6.310e+05),
(7.850e-06, 6.156e+05),
(7.900e-06, 6.006e+05),
(7.950e-06, 5.862e+05),
(8.000e-06, 5.721e+05),
(8.050e-06, 5.585e+05),
(8.100e-06, 5.453e+05),
(8.150e-06, 5.324e+05),
(8.200e-06, 5.200e+05),
(8.250e-06, 5.079e+05),
(8.300e-06, 4.961e+05),
(8.350e-06, 4.847e+05),
(8.400e-06, 4.737e+05),
(8.450e-06, 4.629e+05),
(8.500e-06, 4.525e+05),
(8.550e-06, 4.423e+05),
(8.600e-06, 4.324e+05),
(8.650e-06, 4.228e+05),
(8.700e-06, 4.135e+05),
(8.750e-06, 4.044e+05),
(8.800e-06, 3.956e+05),
(8.850e-06, 3.870e+05),
(8.900e-06, 3.787e+05),
(8.950e-06, 3.706e+05),
(9.000e-06, 3.627e+05),
(9.050e-06, 3.550e+05),
(9.100e-06, 3.475e+05),
(9.150e-06, 3.402e+05),
(9.200e-06, 3.331e+05),
(9.250e-06, 3.262e+05),
(9.300e-06, 3.195e+05),
(9.350e-06, 3.129e+05),
(9.400e-06, 3.065e+05),
(9.450e-06, 3.003e+05),
(9.500e-06, 2.942e+05),
(9.550e-06, 2.883e+05),
(9.600e-06, 2.825e+05),
(9.650e-06, 2.769e+05),
(9.700e-06, 2.714e+05),
(9.750e-06, 2.661e+05),
(9.800e-06, 2.608e+05),
(9.850e-06, 2.558e+05),
(9.900e-06, 2.508e+05),
(9.950e-06, 2.460e+05),
(1.000e-05, 2.412e+05),
(1.100e-05, 1.635e+05),
(1.200e-05, 1.152e+05),
(1.300e-05, 8.340e+04),
(1.400e-05, 6.190e+04),
(1.500e-05, 4.690e+04),
(1.600e-05, 3.610e+04),
(1.700e-05, 2.830e+04),
(1.800e-05, 2.250e+04),
(1.900e-05, 1.810e+04),
(2.000e-05, 1.470e+04),
(2.500e-05, 6.050e+03),
(3.000e-05, 2.930e+03),
(3.500e-05, 1.580e+03),
(4.000e-05, 9.310e+02),
(5.000e-05, 3.830e+02),
(6.000e-05, 1.850e+02),
(8.000e-05, 5.880e+01),
(1.000e-04, 2.420e+01),
(1.200e-04, 1.170e+01),
(1.500e-04, 4.810e+00),
(2.000e-04, 1.530e+00),
(2.500e-04, 6.280e-01),
(3.000e-04, 2.950e-01),
(4.000e-04, 1.010e-01),
(1.000e-03, 3.380e-03),
))
if __name__ == '__main__':
# cam = RosettaSystemModel(focused_attenuated=False).cam
cam = DidymosSystemModel(use_narrow_cam=False).cam
v = tools.q_times_v(tools.ypr_to_q(0, math.radians(10), 0), [1, 0, 0])
print('v: %s' % (v,))
flux_density = Sun.flux_density(cam, v * Sun.AU * 1.2) # [0.31622777, 0.9486833, 0]
img = cam.sense(flux_density, exposure=0.001, gain=1)
img = np.clip(img * 255, 0, 255).astype('uint8')
# img = ImageProc.adjust_gamma(img, 1.8)
sc = min(768 / cam.width, 768 / cam.height)
cv2.imshow('sun', cv2.resize(img, None, fx=sc, fy=sc))
cv2.waitKey()
| {
"repo_name": "oknuutti/visnav-py",
"path": "visnav/render/sun.py",
"copies": "1",
"size": "58918",
"license": "mit",
"hash": 9176452827914252000,
"line_mean": 28.5591286307,
"line_max": 139,
"alpha_frac": 0.503089039,
"autogenerated": false,
"ratio": 1.9368815542917255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2939970593291725,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
@lru_cache(maxsize=None)
def R(i, j, m):
res = pof2[i*j]
for a in range(1,i+1):
for b in range(0, j+1):
if a == i and b == j:
continue
res = (res - nCr[i-1][a-1]*nCr[j][b]*\
pof2[(i-a)*(j-b)]*R(a, b, m)) % m
return res
def precompute(N, m):
# precompute 2^(i*j) for i*j <= N*N
global pof2
pof2 = [0 for i in range((N+1)*(N+1))]
pof2[0] = 1
for i in range(1,(N+1)*(N+1)):
pof2[i] = (pof2[i-1]*2) % m
# precompute n choose r for n,r < N
global nCr
nCr = [[0 for r in range(N+1)] for n in range(N+1)]
for i in range(N+1):
nCr[i][0] = 1
for n in range(1, N+1):
for r in range(1, N+1):
nCr[n][r] = (nCr[n-1][r-1] + nCr[n-1][r]) % m
def S(N, m):
# sum up all R (use R_ij = R_ji)
res = 0
for i in range(1, N+1):
for j in range(i, N+1):
f = 1 if i == j else 2
res += f*R(i, j, m) % m
return res % m
if __name__=="__main__":
m = 1000000033
N = 100
from datetime import datetime
startTime = datetime.now()
precompute(N,m)
print(S(N,m))
print("runtime: {0}".format( datetime.now()-startTime) )
| {
"repo_name": "baderj/project-euler",
"path": "434/euler_434.py",
"copies": "1",
"size": "1311",
"license": "mit",
"hash": 6166072626842428000,
"line_mean": 25.3125,
"line_max": 60,
"alpha_frac": 0.4515636918,
"autogenerated": false,
"ratio": 2.6115537848605577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3563117476660557,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache, singledispatch
from typing import Any, Callable, List, Tuple, Union
import attr
from .errors import StructureHandlerNotFoundError
@attr.s
class _DispatchNotFound:
"""A dummy object to help signify a dispatch not found."""
pass
class MultiStrategyDispatch:
"""
MultiStrategyDispatch uses a combination of exact-match dispatch,
singledispatch, and FunctionDispatch.
"""
__slots__ = (
"_direct_dispatch",
"_function_dispatch",
"_single_dispatch",
"_generators",
"dispatch",
)
def __init__(self, fallback_func):
self._direct_dispatch = {}
self._function_dispatch = FunctionDispatch()
self._function_dispatch.register(lambda _: True, fallback_func)
self._single_dispatch = singledispatch(_DispatchNotFound)
self.dispatch = lru_cache(maxsize=None)(self._dispatch)
def _dispatch(self, cl):
try:
dispatch = self._single_dispatch.dispatch(cl)
if dispatch is not _DispatchNotFound:
return dispatch
except Exception:
pass
direct_dispatch = self._direct_dispatch.get(cl)
if direct_dispatch is not None:
return direct_dispatch
return self._function_dispatch.dispatch(cl)
def register_cls_list(self, cls_and_handler, direct: bool = False):
"""register a class to direct or singledispatch"""
for cls, handler in cls_and_handler:
if direct:
self._direct_dispatch[cls] = handler
else:
self._single_dispatch.register(cls, handler)
self.clear_direct()
self.dispatch.cache_clear()
def register_func_list(
self,
func_and_handler: List[
Union[
Tuple[Callable[[Any], bool], Any],
Tuple[Callable[[Any], bool], Any, bool],
]
],
):
"""register a function to determine if the handle
should be used for the type
"""
for tup in func_and_handler:
if len(tup) == 2:
func, handler = tup
self._function_dispatch.register(func, handler)
else:
func, handler, is_gen = tup
self._function_dispatch.register(
func, handler, is_generator=is_gen
)
self.clear_direct()
self.dispatch.cache_clear()
def clear_direct(self):
"""Clear the direct dispatch."""
self._direct_dispatch.clear()
class FunctionDispatch:
"""
FunctionDispatch is similar to functools.singledispatch, but
instead dispatches based on functions that take the type of the
first argument in the method, and return True or False.
objects that help determine dispatch should be instantiated objects.
"""
__slots__ = ("_handler_pairs",)
def __init__(self):
self._handler_pairs = []
def register(
self, can_handle: Callable[[Any], bool], func, is_generator=False
):
self._handler_pairs.insert(0, (can_handle, func, is_generator))
def dispatch(self, typ):
"""
returns the appropriate handler, for the object passed.
"""
for can_handle, handler, is_generator in self._handler_pairs:
# can handle could raise an exception here
# such as issubclass being called on an instance.
# it's easier to just ignore that case.
try:
ch = can_handle(typ)
except Exception:
continue
if ch:
if is_generator:
return handler(typ)
else:
return handler
raise StructureHandlerNotFoundError(
f"unable to find handler for {typ}", type_=typ
)
| {
"repo_name": "Tinche/cattrs",
"path": "src/cattr/dispatch.py",
"copies": "1",
"size": "3900",
"license": "mit",
"hash": 3024851061255141400,
"line_mean": 29.46875,
"line_max": 73,
"alpha_frac": 0.5761538462,
"autogenerated": false,
"ratio": 4.659498207885305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5735652054085305,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache, wraps
from weakref import ref
from colour_runner.django_runner import ColourRunnerMixin
from django.test.runner import DiscoverRunner
from django.utils.crypto import get_random_string as _get_random_string
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.text import slugify as django_slugify
from unidecode import unidecode
SLUG_TRANSTABLE = str.maketrans('./_', '---')
def slugify(input_data):
"""
Powerup version of the original django slugify.
"""
pass_one = unidecode(force_text(input_data))\
.translate(SLUG_TRANSTABLE)\
.strip('-')
pass_two = django_slugify(value=pass_one)
return mark_safe(pass_two)
def memoized_method(*lru_args, **lru_kwargs):
'http://stackoverflow.com/a/33672499/1067833'
def decorator(func):
@wraps(func)
def wrapped_func(self, *args, **kwargs):
# We're storing the wrapped method inside the instance. If we had
# a strong reference to self the instance would be never
# garbage collected.
self_weak = ref(self)
@wraps(func)
@lru_cache(*lru_args, **lru_kwargs)
def cached_method(*args, **kwargs):
return func(self_weak(), *args, **kwargs)
setattr(self, func.__name__, cached_method)
return cached_method(*args, **kwargs)
return wrapped_func
return decorator
class DjangoTestRunner(ColourRunnerMixin, DiscoverRunner):
"""
Colorized test runner for the project:
"""
pass
def get_random_safestring(
length=10, allowed_chars=(
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789')):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I"
or "O" or letters and digits that look similar -- just to avoid
confusion.
"""
return _get_random_string(length=length, allowed_chars=allowed_chars)
| {
"repo_name": "karolyi/forum-django",
"path": "backend/forum/utils/__init__.py",
"copies": "1",
"size": "2061",
"license": "mit",
"hash": 6230680658972882000,
"line_mean": 32.2419354839,
"line_max": 77,
"alpha_frac": 0.6695778748,
"autogenerated": false,
"ratio": 3.9482758620689653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 62
} |
from functools import namedtuple
Tile = namedtuple('Tile', ['top', 'bottom'])
def placeDominoTile(tiles, newTile):
if not tiles:
return [newTile]
elif len(tiles) == 1:
if tiles[0].bottom == newTile.top:
tiles.append(newTile)
return tiles
elif tiles[0].top == newTile.bottom:
tiles.insert(0, newTile)
return tiles
else:
for index, tile in enumerate(tiles):
try:
if tile.bottom == newTile.top and newTile.bottom == tiles[index + 1].top:
tiles.insert(index + 1, newTile)
return tiles
except IndexError:
if tile.bottom == newTile.top:
tiles.append(newTile)
return tiles
print(placeDominoTile([], Tile(1, 1)))
print(placeDominoTile([Tile(1,1)], Tile(3,2)))
print(placeDominoTile([Tile(2,1)], Tile(3,2)))
print(placeDominoTile([Tile(1,3)], Tile(3,2)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(3,3)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(1,1)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(6,4)))
def calculateDominoTiles(tiles):
res = 0
for index, tile in enumerate(tiles):
try:
if tile.bottom != tiles[index + 1].top:
return -1
except IndexError:
pass
res += tile.top + tile.bottom
return res
print(calculateDominoTiles([Tile(3,3)]))
print(calculateDominoTiles([Tile(3,3), Tile(3,5)]))
print(calculateDominoTiles([Tile(3,3), Tile(3,4), Tile(5,1)])) | {
"repo_name": "adrianbeloqui/Python",
"path": "domino_tiles_reworked.py",
"copies": "1",
"size": "1632",
"license": "mit",
"hash": -441264587387693500,
"line_mean": 33.7446808511,
"line_max": 89,
"alpha_frac": 0.5808823529,
"autogenerated": false,
"ratio": 3.1445086705202314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9165710180457577,
"avg_score": 0.011936168592530933,
"num_lines": 47
} |
from functools import *
class INHooker:
'''The IN Hook system.
'''
# for hook registration
__In_hooks__ = {} #OrderedDict()
# to notify the callbacks of the interest about the new hook.
hook_callbacks = {}
################### __ recusrsive hooks __ ######################
# context needs to be initiated before registry
#def add_not_recursive_hooks(self, hook):
#IN.context.__not_recursive_hooks__.append(hook)
#def remove_not_recursive_hooks(self, hook):
#IN.context.__not_recursive_hooks__.remove(hook)
#def clear_not_recursive_hooks(self, hook):
#IN.context.__not_recursive_hooks__ = []
########################## __ hooks __ #######################
def hook(self, func):
'''The IN hook system.
IN using decorator to mark the hooks. All hooks which has
the same name of the key provided in IN.hook_invoke function
will be notified/called and returns the results.
'''
@wraps(func)
def hook(*args, **kwds):
return func(*args, **kwds)
#if not self.registration_disabled:
fun_name = func.__name__
try:
In_hooks = self.__In_hooks__[fun_name]
except KeyError:
self.__In_hooks__[fun_name] = []
In_hooks = self.__In_hooks__[fun_name]
In_hooks.append(hook)
# notify the callbacks
self.notfy_hook_callbacks(fun_name, func)
return hook
def registered_hooks(self, hook, sort = False):
'''Returns the IN hooks registered for hook.
TODO: orderby the hooks based on the module weight
'''
hooks = self.__In_hooks__.get(hook, [])
return hooks
#def hook_implements(self, hook):
#'''Returns all modules that has implemented the hook.
#'''
#for func in self.registered_hooks(hook):
#yield func
#def hook_disable(self, hook = '', disable = True):
#'''
#'''
## if called with empty hook, return all disabled hooks
#if hook == '':
#return IN.context.__disabled_hooks__ # per request based
#if disable:
#IN.context.__disabled_hooks__.append(hook)
#return IN.context.__disabled_hooks__
#try:
#IN.context.__disabled_hooks__.remove(hook)
#except:
#pass
#return IN.context.__disabled_hooks__
def hook_invoke(self, hook, *args, __max_hooks__ = None, __inc_result__ = None, __error_handler__ = None, __inc_module__ = None, **kargs):
'''Invoke all the hook methods which are registered for the hook 'hook'.
The following keyword arguments can be passed to this function.
__inc_module__: The module name of the hooks will be included in the result .
__inc_result__: The result will be included in arguments as '__result__' key
which lets the hooks to inspect and return appropriate results.
__max_hooks__: This lets you control how many hooks it should invoke,
Please note that all the hooks will be sorted by the module position
before invoking.
__error_handler__: [any function, return, ignore] This error handler function will be invoked
if any erros occured during the hook invocation. The default is to IN.logger the error.
__return_yield__: if True it will yield the result instead of return. __inc_module__ apply.
'''
result = []
#IN.context.__hooks_in_action__.append(hook)
hooks = self.registered_hooks(hook)
if not hooks:
return result
if __max_hooks__ is not None:
funs_to_call = hooks[:__max_hooks__]
else:
funs_to_call = hooks
if __inc_result__: # if __inc_result__ supplied the current result wil be available to hooks on __result__ variable.
kargs['__result__'] = result
self___function_invoke__ = self.__function_invoke__
# whether the result includes the module name?
if __inc_module__:
return [{fun.__module__ : self___function_invoke__(fun, args, kargs, __error_handler__)} for fun in funs_to_call]
else:
return [self___function_invoke__(fun, args, kargs, __error_handler__) for fun in funs_to_call]
#result = list(map((lambda fun: self.__function_invoke__(fun, args, kargs, __error_handler__)), funs_to_call))
#return result
# remove hook.
#IN.context.__hooks_in_action__.remove(hook)
def hook_invoke_yield(self, hook, *args, __max_hooks__ = None, __inc_result__ = None, __error_handler__ = None, __inc_module__ = None, **kargs):
'''Invoke all the hook methods which are registered for the hook 'hook' and yield the results one by one so you can stop at where you want.
The following keyword arguments can be passed to this function.
__inc_module__: The module name of the hooks will be included in the result .
__inc_result__: The result will be included in arguments as '__result__' key
which lets the hooks to inspect and return appropriate results.
__max_hooks__: This lets you control how many hooks it should invoke,
Please note that all the hooks will be sorted by the module position
before invoking.
__error_handler__: [any function, return, ignore] This error handler function will be invoked
if any erros occured during the hook invocation. The default is to IN.logger the error.
__return_yield__: if True it will yield the result instead of return. __inc_module__ apply.
'''
#IN.context.__hooks_in_action__.append(hook)
hooks = self.registered_hooks(hook)
if not hooks:
return
if __max_hooks__ is not None:
funs_to_call = hooks[:__max_hooks__]
else:
funs_to_call = hooks
# if __inc_result__ supplied the current result wil be available to hooks on __result__ variable.
# so that, next hook implementation can use that
if __inc_result__:
kargs['__result__'] = result
self___function_invoke__ = self.__function_invoke__
# whether the result includes the module name?
if __inc_module__:
for fun in funs_to_call:
yield {fun.__module__ : self___function_invoke__(fun, args, kargs, __error_handler__)}
else:
for fun in funs_to_call:
yield self___function_invoke__(fun, args, kargs, __error_handler__)
def __function_invoke__(self, fun, args, kargs, __error_handler__ = None):
'''Helper function to invoke the hook function instance.
'''
try :
return fun(*args, **kargs)
except Exception as e :
IN.logger.debug('Callback error: Function: {f}, Error handler: {err}', {'err' : str(__error_handler__), 'f' : str(fun)})
if __error_handler__ is None: # default
raise
if type(__error_handler__) is str:
if __error_handler__ == 'ignore': # ignore the error
IN.logger.debug()
return
if __error_handler__ == 'return' :
# return the error
IN.logger.debug()
return e
raise
if hasattr(__error_handler__, '__call__'):
# if __error_handler__ is not none, notify that, here is an Exception.
try :
__error_handler__(fun, *args, **kargs)
return
except Exception as err :
# double Error?
# TODO : add to IN.logger.
IN.logger.debug()
# TODO :
raise # re raise the error
raise
def clear_hooks(self):
# TODO: how? to find the decorators again?
# but we need this to rebuld, if any module uninstalled.
# IN Reboot? reinstance registry?
pass
def add_hook_callback(self, hook_name, callback):
'''adds the callback when IN got the new hook defined
'''
try:
hook_callbacks = self.hook_callbacks[hook_name]
except KeyError:
self.hook_callbacks[hook_name] = []
hook_callbacks = self.hook_callbacks[hook_name]
hook_callbacks.append(callback)
def notfy_hook_callbacks(self, hook_name, hook):
'''notifies all callbacks which are registered for the hook hook_name
'''
try:
hooks = self.hook_callbacks[hook_name]
if not hooks:
return
for func in hooks:
try:
func(hook_name, hook)
except Exception as e:
IN.logger.debug()
#print(e)
except KeyError:
pass
################################## X Invoker ################
class XHook:
'''XHook class provides the context manager based auto hook invoker.
before = ['before']
This can be list of strings or functions.
if string, it will be included in hook invoke.
or it will be called directly
after = ['after']
same as before
callthis
which function or hook it should be called. it uses the same wrapped function name if it is empty.
suffix
should before or after string suffxed with the hook name?
@example
@IN.hooker.xhook()
def fun():
pass
@IN.hook
def fun_before():
#called before fun call
pass
@IN.hook
def fun_after():
#called after fun call
pass
'''
def __init__(self, func, before = ['before'], after = ['after'], callthis = '', suffix = False, ignoreerror = False, args=None, kargs=None):
self.before = before
self.after = after
self.func = func
self.callthis = callthis
self.suffix = suffix
self.ignoreerror = ignoreerror
if args is None:
args = []
if kargs is None:
kargs = {}
self.args = args
self.kargs = kargs
self.func_result = None
def call_hook(self, when):
hook = ''
if not self.callthis:
hook = self.func.__name__
elif type(self.callthis) is str:
hook = self.callthis
else:
hook = '' #because invokeall is not to be called
if hook and self.suffix:
hook = '_'.join((hook, when))
return hook
def __call__(self):
'''This method returns another invoke function decorator.
'''
return self.invoke(self.before, self.after, self.callthis, self.suffix, self.ignoreerror, self.args, self.kargs)
def __invoke_calls__(self, calls, when):
for call in calls:
if type(call) is str:
hook = ''
if not self.callthis:
hook = self.func.__name__
elif type(self.callthis) is str:
hook = self.callthis
else:
hook = '' #because invokeall is not to be called
if hook:
if self.suffix:
hook = '_'.join((hook, call))
IN.hook_invoke(hook, *self.args, **self.kargs)
else:
call(*args, **kargs)
def __enter__(self):
'''Context enter'''
self.__invoke_calls__(self.before)
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Context exit'''
self.__invoke_calls__(self.after)
return self.ignoreerror
def xhook(before = ['before'], after = ['after'], callthis = '', suffix = True, ignoreerror = False, args=None, kargs=None):
'''Decorator for XInvoker
@see class XInvoker:
'''
if args is None:
args = []
if kargs is None:
kargs = {}
def hookfunc(func):
@wraps(func)
def hook(*args, **kargs):
with XHook(func, before, after, callthis, suffix, ignoreerror, args, kargs) as __invoker__:
res = func(*args, **kargs)
__invoker__.func_result = res
return res
return hook
return hookfunc
def function_invoke(f, *args, **keys):
'''Helper function.'''
try:
return f(*args,**keys)
except Exception as e:
IN.logger.debug('Invoke : {f} {e} in module {m}', {'f' : f.__name__, 'e' : str(e), 'm' : f.__module__})
def invoke(m, hook, *args, **keys):
try:
if hasattr(m, hook):
#IN.logger.add_debug('Invoking : ' + m.__name__ + '.' + hook + '()')
f = getattr(m, hook)
return f(*args,**keys)
#else:
#IN.logger.add_debug(m.__name__ + ': has no ' + hook + '() hook defined!')
except Exception as e:
IN.logger.debug('Invoke : {hook} {e} in module {m}', {'hook' : hook, 'e' : str(e), 'm' : f.__module__})
| {
"repo_name": "vinoth3v/In",
"path": "In/core/hooker.py",
"copies": "1",
"size": "11208",
"license": "apache-2.0",
"hash": 227922586459780960,
"line_mean": 25.4964539007,
"line_max": 145,
"alpha_frac": 0.6372234118,
"autogenerated": false,
"ratio": 3.316957679786919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9188735841529749,
"avg_score": 0.0530890500114339,
"num_lines": 423
} |
from functools import partial as curry
from django import forms
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from pinax.images.models import ImageSet
from .conf import settings
from .models import Post, Revision, Section
from .signals import post_published
from .utils import load_path_attr
FIELDS = [
"section",
"author",
"markup",
"title",
"slug",
"teaser",
"content",
"description",
"state"
]
class PostFormMixin:
@property
def markup_choice(self):
return self.cleaned_data["markup"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
post = self.instance
latest_revision = post.latest()
if latest_revision:
# set initial data from the latest revision
self.fields["teaser"].initial = latest_revision.teaser
self.fields["content"].initial = latest_revision.content
def save_post(self, post):
published = False
if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
post.published = timezone.now()
published = True
render_func = curry(
load_path_attr(
settings.PINAX_BLOG_MARKUP_CHOICE_MAP[self.markup_choice]["parser"]
)
)
post.teaser_html = render_func(self.cleaned_data["teaser"])
post.content_html = render_func(self.cleaned_data["content"])
post.updated = timezone.now()
post.save()
r = Revision()
r.post = post
r.title = post.title
r.teaser = self.cleaned_data["teaser"]
r.content = self.cleaned_data["content"]
r.author = post.author
r.updated = post.updated
r.published = post.published
r.save()
if published:
post_published.send(sender=Post, post=post)
return post
class AdminPostForm(PostFormMixin, forms.ModelForm):
title = forms.CharField(
label=_("Title"),
max_length=90,
widget=forms.TextInput(attrs={"style": "width: 50%;"}),
)
slug = forms.CharField(
label=_("Slug"),
widget=forms.TextInput(attrs={"style": "width: 50%;"})
)
teaser = forms.CharField(
label=_("Teaser"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
)
content = forms.CharField(
label=_("Content"),
widget=forms.Textarea(attrs={"style": "width: 80%; height: 300px;"})
)
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
required=False
)
class Meta:
model = Post
fields = FIELDS
class Media:
js = settings.PINAX_BLOG_ADMIN_JS
def save(self, blog=None):
post = super().save(commit=False)
if blog:
post.blog = blog
return self.save_post(post)
class PostForm(PostFormMixin, forms.ModelForm):
markup_choice = "markdown"
teaser = forms.CharField(widget=forms.Textarea())
content = forms.CharField(widget=forms.Textarea())
class Meta:
model = Post
fields = [
"section",
"title",
"teaser",
"content",
"description",
"state"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Section.objects.count() < 2:
self.section = Section.objects.first()
del self.fields["section"]
else:
self.section = None
def save(self, blog=None, author=None):
post = super().save(commit=False)
if blog:
post.blog = blog
if author:
post.author = author
post.image_set = ImageSet.objects.create(created_by=author)
if self.section:
post.section = self.section
post.slug = slugify(post.title)
post.markup = self.markup_choice
return self.save_post(post)
| {
"repo_name": "pinax/pinax-blog",
"path": "pinax/blog/forms.py",
"copies": "1",
"size": "4178",
"license": "mit",
"hash": 2936923382800509400,
"line_mean": 26.1298701299,
"line_max": 86,
"alpha_frac": 0.5753949258,
"autogenerated": false,
"ratio": 3.956439393939394,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001519416544047086,
"num_lines": 154
} |
from functools import partial as _partial
from .formatstring import fmtstr
black = _partial(fmtstr, style="black")
red = _partial(fmtstr, style="red")
green = _partial(fmtstr, style="green")
yellow = _partial(fmtstr, style="yellow")
blue = _partial(fmtstr, style="blue")
magenta = _partial(fmtstr, style="magenta")
cyan = _partial(fmtstr, style="cyan")
gray = _partial(fmtstr, style="gray")
on_black = _partial(fmtstr, style="on_black")
on_dark = on_black # deprecated, old name of on_black
on_red = _partial(fmtstr, style="on_red")
on_green = _partial(fmtstr, style="on_green")
on_yellow = _partial(fmtstr, style="on_yellow")
on_blue = _partial(fmtstr, style="on_blue")
on_magenta = _partial(fmtstr, style="on_magenta")
on_cyan = _partial(fmtstr, style="on_cyan")
on_gray = _partial(fmtstr, style="on_gray")
bold = _partial(fmtstr, style="bold")
dark = _partial(fmtstr, style="dark")
underline = _partial(fmtstr, style="underline")
blink = _partial(fmtstr, style="blink")
invert = _partial(fmtstr, style="invert")
plain = _partial(fmtstr)
| {
"repo_name": "sebastinas/curtsies",
"path": "curtsies/fmtfuncs.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": -2312427595971407000,
"line_mean": 35.0344827586,
"line_max": 54,
"alpha_frac": 0.7062200957,
"autogenerated": false,
"ratio": 2.9353932584269664,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4141613354126966,
"avg_score": null,
"num_lines": null
} |
from functools import partial, lru_cache
from Levenshtein import hamming
feature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
@lru_cache(maxsize=None)
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
feature_string = ''
for feature in feature_order:
if feature in segment.positive:
feature_string += '+'
elif feature in segment.negative:
feature_string += '-'
else:
feature_string += '0'
return feature_string
def segment_match(feature_strings, target_segment):
'''Returns the best match for the IPA string of the given Segment, from the
given list of tuples containing feature strings. The first item in each
tuple is the phoneme and the second is the feature string.
'''
target_feature_string = feature_string(target_segment)
# If the segment has previously been matched, return the cached value
if target_feature_string in deparse_cache:
return deparse_cache[target_feature_string]
# Find the distance of the initial candidate to serve as a benchmark.
best_distance = hamming(target_feature_string, feature_strings[0][1])
best_strings = [feature_strings[0][0]]
# Loop through the rest of the available strings. If the distance between
# the string and the target is greater than the current best, jump to the
# next string. Otherwise, if it's the same add it to best_strings, or if
# it's less overwrite best_strings.
for string in feature_strings[1:]:
new_distance = hamming(target_feature_string, string[1])
if new_distance > best_distance:
continue
elif new_distance < best_distance:
best_distance = new_distance
best_strings = [string[0]]
else:
best_strings.append(string[0])
# Find the shortest of these strings, because we want to deparse
# into the simplest segments possible.
deparsed_segment = min(best_strings, key=len)
# Add the new match to the cache.
deparse_cache[target_feature_string] = deparsed_segment
return deparsed_segment
def initialise_cache():
'''Creates the global cache for deparsing, where segment matches will be
stored.'''
global deparse_cache
deparse_cache = {}
def deparse_words(words, segments, feature_strings):
'''Given a list of Words, return a list of IPA strings, one for each
word.'''
initialise_cache()
# Partially apply the segment_match function to avoid repeated calls with
# the feature_strings object.
deparse = partial(segment_match, feature_strings)
# Deparse each segment in each word
word_strings = [''.join(deparse(segment) for segment in word.segments)
for word in words]
return word_strings
| {
"repo_name": "kdelwat/LangEvolve",
"path": "engine/deparse.py",
"copies": "1",
"size": "3568",
"license": "mit",
"hash": 9019424864553604000,
"line_mean": 30.8571428571,
"line_max": 79,
"alpha_frac": 0.5877242152,
"autogenerated": false,
"ratio": 4.421313506815365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5509037722015365,
"avg_score": null,
"num_lines": null
} |
from functools import partialmethod as P
from unittest.mock import MagicMock
from ..CPU import CPU32
from ..FPU import binary80
from ..util import Instruction
if __debug__:
import logging
logger = logging.getLogger(__name__)
# FLD
class FLD(Instruction):
m_st = MagicMock(return_value=False)
def __init__(self):
self.opcodes = {
# FLD
0xD9: P(self.m_fp, bits=32, REG=0),
0xDD: P(self.m_fp, bits=64, REG=0),
0xDB: P(self.m_fp, bits=80, REG=5),
**{
0xD9C0 + i: P(self.m_st, i=i)
for i in range(8)
}
}
def m_fp(vm: CPU32, bits: int, REG: int):
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
# sz = vm.operand_size
RM, R = vm.process_ModRM()
_, loc = RM
flt80 = vm.mem.get_float(loc, bits)
vm.fpu.push(flt80)
logger.debug('fld%d 0x%08x = %s', bits // 8, loc, flt80)
return True
# FILD
class FILD(Instruction):
def __init__(self):
self.opcodes = {
0xDB: P(self.m_int, is32bit=True, REG=0),
0xDF: [
P(self.m_int, is32bit=False, REG=0),
P(self.m_int, is32bit=False, REG=5),
]
}
def m_int(vm: CPU32, is32bit: bool, REG: int) -> True:
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
sz = {
(False, 0): 2,
(True, 0): 4,
(False, 5): 8
}[(is32bit, REG)]
RM, R = vm.process_ModRM()
_, loc = RM
imm = vm.mem.get(loc, sz)
flt80 = binary80.from_int(imm)
vm.fpu.push(flt80)
logger.debug('fild%d %d', sz * 8, imm)
return True
# FST / FSTP
class FST(Instruction):
st = MagicMock(return_value=False)
def __init__(self):
self.opcodes = {
0xD9: [
P(self.m_fp, bits=32, REG=2), # FST m32fp
P(self.m_fp, bits=32, REG=3), # FSTP m32fp
],
0xDD: [
P(self.m_fp, bits=64, REG=2), # FSTP m32fp
P(self.m_fp, bits=64, REG=3), # FSTP m64fp
],
0xDB: P(self.m_fp, bits=80, REG=7), # FSTP m80fp
**{
0xDDD0 + i: P(self.st, i=i, pop=False)
for i in range(8)
},
**{
0xDDD8 + i: P(self.st, i=i, pop=True)
for i in range(8)
}
}
def m_fp(vm, bits: int, REG: int):
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
RM, R = vm.process_ModRM()
_, loc = RM
data = vm.fpu.ST(0)
vm.mem.set_float(loc, bits // 8, data)
if R[1] == 2:
logger.debug('fst 0x%08x := %s', loc, data)
else:
vm.fpu.pop()
logger.debug('fstp 0x%08x := %s', loc, data)
return True
# FIST / FISTP
class FIST(Instruction):
def __init__(self):
self.opcodes = {
0xDF: [
P(self.fist, size=2, REG=2), # FIST m16int
P(self.fist, size=2, REG=3), # FISTP m16int
P(self.fist, size=8, REG=7), # FISTP m64int
],
0xDB: [
P(self.fist, size=4, REG=2), # FIST m32int
P(self.fist, size=4, REG=3), # FISTP m32int
]
}
def fist(vm, size: int, REG: int) -> bool:
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
RM, R = vm.process_ModRM()
_, loc = RM
SRC = int(vm.fpu.ST(0))
vm.mem.set(loc, size, SRC)
if REG != 2:
vm.fpu.pop()
logger.debug('fistp 0x%08x := %d', loc, SRC)
else:
logger.debug('fist 0x%08x := %d', loc, SRC)
return True
# FMUL/FMULP/FIMUL
class FMUL(Instruction):
def __init__(self):
self.opcodes = {
**{
0xDEC8 + i: P(self.fmulp, i=i)
for i in range(8)
}
}
def fmulp(vm, i: int) -> True:
res = vm.fpu.mul(i, 0)
vm.fpu.pop()
logger.debug('fmulp (ST(%d) = %s)', i + 1, res)
return True
# FADDP
class FADDP(Instruction):
def __init__(self):
self.opcodes = {
**{
0xDEC0 + i: P(self.faddp, i=i)
for i in range(8)
}
}
def faddp(vm, i: int) -> True:
res = vm.fpu.add(i, 0)
logger.debug('faddp ST(%d), ST(0) (ST(%d) := %s)', i, i + 1, res)
vm.fpu.pop()
return True
# FDIV/FDIVP
class FDIV(Instruction):
def __init__(self):
self.opcodes = {
**{
0xD8F0 + i: P(self.fdiv, i=i, reverse=True)
for i in range(8)
},
**{
0xDCF8 + i: P(self.fdiv, i=i, reverse=False)
for i in range(8)
},
**{
0xDEF8 + i: P(self.fdivp, i=i)
for i in range(8)
},
}
def fdiv(vm, i: int, reverse: bool) -> True:
"""
Divide register by register.
:param i:
:param reverse: If `False`, compute ST0 = ST0 / STi, otherwise STi = STi / ST0
:return:
"""
if reverse:
res = vm.fpu.div(0, i)
logger.debug('fdiv ST(0), ST(%d) (ST(0) := %s)', i, res)
else:
res = vm.fpu.div(i, 0)
logger.debug('fdiv ST(%d), ST(0) (ST(%d) := %s)', i, i, res)
return True
def fdivp(vm, i: int) -> True:
res = vm.fpu.div(i, 0)
vm.fpu.pop()
logger.debug('fdivp ST(%d), ST(0) (ST(%d) := %s)', i, i + 1, res)
return True
# FUCOM/FUCOMP/FUCOMPP/FCOMI/FCOMIP/FUCOMIP/FUCOMIPP
class FCOMP(Instruction):
def __init__(self):
self.opcodes = {
# F*COM*
**{
0xD0E0 + i: P(self.fucom, pop=0, i=i, set_eflags=False)
for i in range(8)
},
**{
0xDDE8 + i: P(self.fucom, pop=1, i=i, set_eflags=False)
for i in range(8)
},
0xDAE9: P(self.fucom, pop=2, i=1, set_eflags=False),
# FCOMI
**{
0xDBF0 + i: P(self.fcom, pop=0, i=i, set_eflags=True)
for i in range(8)
},
# FCOMIP
**{
0xDFF0 + i: P(self.fcom, pop=1, i=i, set_eflags=True)
for i in range(8)
},
# FUCOMI
**{
0xDBE8 + i: P(self.fucom, pop=0, i=i, set_eflags=True)
for i in range(8)
},
# FUCOMIP
**{
0xDFE8 + i: P(self.fucom, pop=1, i=i, set_eflags=True)
for i in range(8)
},
}
def fucom(vm, pop: int, i: int, set_eflags: bool) -> True:
# Vol. 2A FUCOM/FUCOMP/FUCOMPP—Unordered Compare Floating Point Values
# TODO: this should raise some exception or something?
ST0, STi = vm.fpu.ST(0), vm.fpu.ST(i)
if ST0 > STi:
flags = 0, 0, 0
elif ST0 < STi:
flags = 0, 0, 1
elif ST0 == STi:
flags = 1, 0, 0
else:
# unordered
flags = 1, 1, 1
if set_eflags:
vm.reg.eflags.ZF, vm.reg.eflags.PF, vm.reg.eflags.CF = flags
else:
vm.fpu.status.C3, vm.fpu.status.C2, vm.fpu.status.C0 = flags
for _ in range(pop):
vm.fpu.pop()
return True
def fcom(vm, pop: int, i: int, set_eflags: bool) -> True:
ST0, STi = vm.fpu.ST(0), vm.fpu.ST(i)
if ST0 > STi:
flags = 0, 0, 0
elif ST0 < STi:
flags = 0, 0, 1
elif ST0 == STi:
flags = 1, 0, 0
else:
# unordered
flags = 1, 1, 1
if set_eflags:
vm.reg.eflags.ZF, vm.reg.eflags.PF, vm.reg.eflags.CF = flags
else:
vm.fpu.status.C3, vm.fpu.status.C2, vm.fpu.status.C0 = flags
for _ in range(pop):
vm.fpu.pop()
return True
# FLDCW
class FLDCW(Instruction):
def __init__(self):
self.opcodes = {
0xD9: self.m2byte
}
def m2byte(vm, REG=5) -> bool:
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
RM, R = vm.process_ModRM()
_, loc = RM
control = vm.mem.get(loc, 2)
vm.fpu.control.value = control
logger.debug('fldcw 0x%08x := %04x', loc, control)
return True
# FSTCW/FNSTCW
class FSTCW(Instruction):
def __init__(self):
self.opcodes = {
0xD9: P(self.m2byte, check=False),
0x9BD9: P(self.m2byte, check=True)
}
def m2byte(vm, check: bool, REG=7) -> bool:
ModRM = vm.mem.get_eip(vm.eip, 1)
_REG = (ModRM & 0b00111000) >> 3
if _REG != REG:
return False
RM, R = vm.process_ModRM()
_, loc = RM
vm.mem.set(loc, 2, vm.fpu.control.value)
if check:
# TODO: add check? WTF?
logger.debug('fstcw 0x%08x := %02x', loc, vm.fpu.control.value)
else:
logger.debug('fnstcw 0x%08x := %02x', loc, vm.fpu.control.value)
return True
# FXCH
class FXCH(Instruction):
def __init__(self):
self.opcodes = {
0xD9C8 + i: P(self.fxch, i=i)
for i in range(8)
}
def fxch(vm, i: int) -> True:
temp = vm.fpu.ST(0)
vm.fpu.store(0, vm.fpu.ST(i))
vm.fpu.store(i, temp)
vm.fpu.status.C1 = 0
logger.debug('fxch ST(%d)', i)
return True
| {
"repo_name": "ForceBru/PyVM",
"path": "VM/instructions/floating.py",
"copies": "1",
"size": "10127",
"license": "mit",
"hash": -5023852498283869000,
"line_mean": 23.3389423077,
"line_max": 86,
"alpha_frac": 0.4462222222,
"autogenerated": false,
"ratio": 2.992021276595745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8938105346717937,
"avg_score": 0.000027630415561450043,
"num_lines": 416
} |
from functools import partialmethod as P
from unittest.mock import MagicMock
from ..CPU import CPU32
from ..util import Instruction, to_int, to_signed, byteorder
if __debug__:
from ..debug import debug_operand, debug_register_operand
import logging
logger = logging.getLogger(__name__)
MAXVALS = [None, (1 << 8) - 1, (1 << 16) - 1, None, (1 << 32) - 1] # MAXVALS[n] is the maximum value of an unsigned n-byte number
SIGNS = [None, 1 << 8 - 1, 1 << 16 - 1, None, 1 << 32 - 1] # SIGNS[n] is the maximum absolute value of a signed n-byte number
class NOP(Instruction):
def __init__(self):
self.opcodes = {
0x90: self.nop,
0x0F1F: self.rm
}
def nop(vm: CPU32) -> True:
if __debug__:
logger.debug('nop')
return True
def rm(vm: CPU32) -> True:
vm.process_ModRM()
if __debug__:
logger.debug('nop')
return True
####################
# JMP
####################
JO = compile('vm.reg.eflags.OF', 'o', 'eval')
JNO = compile('not vm.reg.eflags.OF', 'no', 'eval')
JB = compile('vm.reg.eflags.CF', 'b', 'eval')
JNB = compile('not vm.reg.eflags.CF', 'nb', 'eval')
JZ = compile('vm.reg.eflags.ZF', 'z', 'eval')
JNZ = compile('not vm.reg.eflags.ZF', 'nz', 'eval')
JBE = compile('vm.reg.eflags.CF or vm.reg.eflags.ZF', 'be', 'eval')
JNBE = compile('not vm.reg.eflags.CF and not vm.reg.eflags.ZF', 'nbe', 'eval')
JS = compile('vm.reg.eflags.SF', 's', 'eval')
JNS = compile('not vm.reg.eflags.SF', 'ns', 'eval')
JP = compile('vm.reg.eflags.PF', 'p', 'eval')
JNP = compile('not vm.reg.eflags.PF', 'np', 'eval')
JL = compile('vm.reg.eflags.SF != vm.reg.eflags.OF', 'l', 'eval')
JNL = compile('vm.reg.eflags.SF == vm.reg.eflags.OF', 'nl', 'eval')
JLE = compile('vm.reg.eflags.ZF or vm.reg.eflags.SF != vm.reg.eflags.OF', 'le', 'eval')
JNLE = compile('not vm.reg.eflags.ZF and vm.reg.eflags.SF == vm.reg.eflags.OF', 'nle', 'eval')
JUMPS = [JO, JNO, JB, JNB, JZ, JNZ, JBE, JNBE, JS, JNS, JP, JNP, JL, JNL, JLE, JNLE]
_JMP = compile('True', 'mp', 'eval')
JCXZ = compile('not vm.reg.get(0, sz)', 'cxz', 'eval')
class JMP(Instruction):
"""
Jump to a memory address.
Operation:
EIP = memory_location
"""
def __init__(self):
self.opcodes = {
0xEB: P(self.rel, _8bit=True, jump=_JMP),
0xE9: P(self.rel, _8bit=False, jump=_JMP),
0xFF: self.rm_m,
0xEA: P(self.ptr, _8bit=False),
0xE3: P(self.rel, _8bit=True, jump=JCXZ),
**{
opcode: P(self.rel, _8bit=True, jump=JUMPS[opcode % 0x70])
for opcode in range(0x70, 0x80)
},
**{
opcode: P(self.rel, _8bit=False, jump=JUMPS[opcode % 0x0F80])
for opcode in range(0x0F80, 0x0F90)
}
}
def rel(vm: CPU32, _8bit, jump) -> True:
sz = 1 if _8bit else vm.operand_size
d = vm.mem.get(vm.eip, sz, True)
vm.eip += sz
if not eval(jump):
return True
tmpEIP = vm.eip + d
if vm.operand_size == 2:
tmpEIP &= MAXVALS[vm.operand_size]
assert tmpEIP < vm.mem.size
vm.eip = tmpEIP
if __debug__:
logger.debug('j%s rel%d 0x%08x', jump.co_filename, sz * 8, vm.eip)
return True
def rm_m(vm: CPU32) -> bool:
old_eip = vm.eip
sz = vm.operand_size
RM, R = vm.process_ModRM()
if R[1] == 4: # this is jmp r/m
type, loc = RM
tmpEIP = (type).get(loc, vm.address_size)
vm.eip = tmpEIP & MAXVALS[vm.address_size]
assert vm.eip < vm.mem.size
if __debug__:
logger.debug('jmp rm%d 0x%x', sz * 8, vm.eip)
return True
elif R[1] == 5: # this is jmp m
segment_selector_address = to_int(vm.mem.get(vm.eip, vm.address_size), True)
vm.eip += vm.address_size
offset_address = to_int(vm.mem.get(vm.eip, vm.address_size), True)
vm.eip += vm.address_size
sz = 4 if vm.operand_size == 4 else 2
segment_selector = to_int(vm.mem.get(segment_selector_address, 2), True)
offset = to_int(vm.mem.get(offset_address, sz))
tempEIP = offset
assert vm.eip in vm.mem.bounds
vm.reg.CS = segment_selector # TODO: do something with CS
if vm.operand_size == 4:
vm.eip = tempEIP
else:
vm.eip = tempEIP & 0x0000FFFF
if __debug__:
logger.debug('jmp m%d 0x%x', sz * 8, vm.eip)
return True
vm.eip = old_eip
return False
def ptr(vm: CPU32) -> True:
segment_selector = to_int(vm.mem.get(vm.eip, 2), True)
vm.eip += 2
sz = 4 if vm.operand_size == 4 else 2
offset = to_int(vm.mem.get(vm.eip, sz), True)
vm.eip += sz
tempEIP = offset
assert vm.eip in vm.mem.bounds
vm.reg.CS = segment_selector # TODO: do something with CS
if vm.operand_size == 4:
vm.eip = tempEIP
else:
vm.eip = tempEIP & 0x0000FFFF
if __debug__:
logger.debug('jmp m%d 0x%x', sz * 8, vm.eip)
return True
####################
# SETcc
####################
class SETcc(Instruction):
def __init__(self):
self.opcodes = {
opcode: P(self.rm8, cond=JUMPS[opcode % 0x0F90])
for opcode in range(0x0F90, 0x0FA0)
}
def rm8(vm: CPU32, cond) -> True:
sz = 1 # we know it's 1 byte
RM, R = vm.process_ModRM()
type, loc = RM
byte = eval(cond)
(type).set(loc, sz, byte)
if __debug__:
logger.debug('set%s %s := %d', cond.co_filename, debug_operand(RM, sz), byte)
return True
####################
# CMOVcc
####################
class CMOVCC(Instruction):
def __init__(self):
self.opcodes = {
opcode: P(self.r_rm, cond=JUMPS[opcode % 0x0F40])
for opcode in range(0x0F40, 0x0F50)
}
def r_rm(vm: CPU32, cond) -> True:
sz = vm.operand_size
RM, R = vm.process_ModRM()
if not eval(cond):
return True
type, loc = RM
data = (type).get(loc, sz)
vm.reg.set(R[1], sz, data)
if __debug__:
logger.debug(
'cmov%s %s, %s=0x%x',
cond.co_filename,
debug_operand(R, sz), debug_operand(RM, sz),
data
)
return True
####################
# BT
####################
class BT(Instruction):
def __init__(self):
self.opcodes = {
0x0FBA: self.rm_imm,
0x0FA3: self.rm_r
}
def rm_r(vm: CPU32) -> True:
sz = vm.operand_size
RM, R = vm.process_ModRM()
_type, loc = RM
base = (_type).get(loc, sz)
offset = vm.reg.get(R[1], sz)
if isinstance(_type, type(vm.reg)):
offset %= sz * 8
vm.reg.eflags.CF = (base >> offset) & 1
if __debug__:
logger.debug(
'bt %s, 0x%02x',
debug_operand(RM, sz),
offset
)
return True
def rm_imm(vm: CPU32) -> bool:
sz = vm.operand_size
RM, R = vm.process_ModRM()
_type, loc = RM
if R[1] != 4: # this is not bt
return False
if isinstance(_type, type(vm.mem)):
base = vm.mem.get(loc, 1) # read ONE BYTE
else:
base = vm.reg.get(loc, sz)
offset = vm.mem.get_eip(vm.eip, 1) # always 8 bits
vm.eip += 1
if isinstance(_type, type(vm.reg)): # first arg is a register
offset %= sz * 8
vm.reg.eflags.CF = (base >> offset) & 1
if __debug__:
logger.debug(
'bt %s, 0x%02x',
debug_operand(RM, sz),
offset
)
return True
####################
# INT
####################
class INT(Instruction):
"""
Call to interrupt procedure.
"""
def __init__(self):
self.opcodes = {
0xCC: self._3,
0xCD: self.imm
}
def _3(vm: CPU32) -> True:
vm.descriptors[2].write("[!] It's a trap! (literally)")
return True
def imm(vm: CPU32) -> True:
imm = vm.mem.get_eip(vm.eip, 1) # always 8 bits
vm.eip += 1
vm.interrupt(imm)
if __debug__:
logger.debug('int 0x%x', imm)
return True
####################
# CALL
####################
class CALL(Instruction):
"""
Call a procedure.
"""
def __init__(self):
self.opcodes = {
0xE8: self.rel,
0xFF: self.rm_m,
0x9A: self.ptr
}
# TODO: implement far calls
# rm_m = MagicMock(return_value=False)
ptr = MagicMock(return_value=False)
def rm_m(vm: CPU32) -> bool:
old_eip = vm.eip
sz = vm.operand_size
RM, R = vm.process_ModRM()
if R[1] == 2: # this is call r/m
type, loc = RM
data = (type).get(loc, sz)
tmpEIP = data & MAXVALS[sz]
# TODO: check whether tmpEIP is OK
vm.stack_push(vm.eip)
vm.eip = tmpEIP
if __debug__:
logger.debug(
'call %s=0x%08x => 0x%08x',
debug_operand(RM, sz),
data, vm.eip
)
return True
elif R[1] == 3: # this is call m
vm.eip = old_eip
return False
vm.eip = old_eip
return False
def rel(vm: CPU32) -> True:
sz = vm.operand_size
dest = vm.mem.get(vm.eip, sz, True)
vm.eip += sz
tmpEIP = vm.eip + dest
vm.stack_push(vm.eip)
vm.eip = tmpEIP
if __debug__:
logger.debug('call 0x%08x => 0x%08x', dest, vm.eip)
return True
####################
# RET
####################
class RET(Instruction):
"""
Return to calling procedure.
"""
def __init__(self):
self.opcodes = {
0xC3: self.near,
0xCB: self.far,
0xC2: self.near_imm,
0xCA: self.far_imm,
}
# TODO: implement far returns
far = MagicMock(return_value=False)
far_imm = MagicMock(return_value=False)
def near(vm: CPU32) -> True:
sz = vm.operand_size
vm.eip = to_signed(vm.stack_pop(sz), sz)
if __debug__:
logger.debug('ret 0x%08x', vm.eip)
return True
def near_imm(vm: CPU32) -> True:
sz = vm.operand_size
imm = vm.mem.get(vm.eip, 2)
vm.eip = to_signed(vm.stack_pop(sz), sz)
esp = 4
vm.reg.set(esp, vm.stack_address_size, vm.reg.get(esp, vm.stack_address_size) + imm)
if __debug__:
logger.debug('ret 0x%08x', vm.eip)
return True
####################
# ENTER
####################
class ENTER(Instruction):
def __init__(self):
self.opcodes = {
0xC8: self.enter
}
def enter(vm: CPU32):
AllocSize = vm.mem.get_eip(vm.eip, 2)
vm.eip += 2
NestingLevel = vm.mem.get_eip(vm.eip, 1) % 32
vm.eip += 1
ebp = vm.reg.get(5, vm.operand_size)
vm.stack_push(ebp)
FrameTemp = vm.reg.get(4, vm.operand_size) # ESP
if NestingLevel == 0:
...
elif NestingLevel == 1:
vm.stack_push(FrameTemp.to_bytes(vm.stack_address_size, byteorder))
else:
raise RuntimeError(f"Instruction 'enter {AllocSize}, {NestingLevel}' is not implemented yet")
vm.reg.ebp = FrameTemp & MAXVALS[vm.operand_size]
vm.reg.esp = vm.reg.get(4, vm.operand_size) - AllocSize
if __debug__:
logger.debug('enter 0x%04x, 0x%02x', AllocSize, NestingLevel)
return True
####################
# LEAVE
####################
class LEAVE(Instruction):
def __init__(self):
self.opcodes = {
0xC9: self.leave
}
def leave(vm: CPU32) -> True:
"""
High-level procedure exit.
Operation:
1) ESP <- EBP
2) EBP = stack_pop()
"""
ESP, EBP = 4, 5 # depends on 'self.address_size' and 'self.operand_size'
vm.reg.set(ESP, vm.address_size, vm.reg.get(EBP, vm.address_size))
vm.reg.set(EBP, vm.operand_size, vm.stack_pop(vm.operand_size))
if __debug__:
logger.debug('leave')
return True
####################
# CPUID
####################
class CPUID(Instruction):
def __init__(self):
self.opcodes = {
0x0FA2: self.cpuid
}
def cpuid(vm: CPU32) -> True:
"""
See: https://en.wikipedia.org/wiki/CPUID
"""
eax, ebx, ecx, edx = 0, 3, 1, 2
max_input_value = 0x01
EAX_val = vm.reg.get(eax, 4)
if EAX_val == 0x00:
vm.reg.eax = max_input_value
vm.reg.ebx = int.from_bytes(b'Genu', byteorder)
vm.reg.edx = int.from_bytes(b'ineI', byteorder)
vm.reg.ecx = int.from_bytes(b'ntel', byteorder)
elif EAX_val == 0x01:
# Processor Model, Family, Stepping in EAX (https://en.wikichip.org/wiki/intel/cpuid)
# Family 3, core 80486DX
#
# Reserved: 0b0000
# extended family: 0b0000_0000
# extended model: 0b0000,
# reserved + type: 0b0000 (original OEM processor),
# family: 0b0100,
# model: 0b0001,
# stepping: 0b0000
vm.reg.eax = 0b0000_0000_0000_0000_0000_0100_0001_0000
vm.reg.ebx = 0
vm.reg.ecx = 0
vm.reg.edx = 0b0000_0000_0000_0000_1000_0000_0000_0001 # CMOV (bit 5), fpu (bit 0)
else:
raise RuntimeError(f'Unsupported EAX value for CPUID: 0x{EAX_val:08X}')
if __debug__:
logger.debug('cpuid')
return True
####################
# HLT
####################
class HLT(Instruction):
def __init__(self):
self.opcodes = {
0xF4: self.hlt
}
def hlt(vm: CPU32):
raise RuntimeError(f'HALT @ 0x{vm.eip - 1:08x}') # Subtract 1 because EIP points to the NEXT opcode
| {
"repo_name": "ForceBru/PyVM",
"path": "VM/instructions/control.py",
"copies": "1",
"size": "14663",
"license": "mit",
"hash": 8536883855305437000,
"line_mean": 24.4124783362,
"line_max": 130,
"alpha_frac": 0.4838027689,
"autogenerated": false,
"ratio": 3.2001309471846353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4183933716084635,
"avg_score": null,
"num_lines": null
} |
from functools import partialmethod
from flask import flash, url_for, redirect
from wtforms import fields, validators
import wtforms.fields.html5 as html5_fields
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed
from fernet import db, images
from fernet.models import User, UserTag
from fernet.util import get_redirect_target, is_safe_url
def flash_errors(form):
"""Flash all errors in a form."""
for field in form:
for error in field.errors:
flash(("Error in {} field: {}"
.format(field.label.text, error)),
'error')
class TagForm:
"""Namespace for tag forms.
Do not use this as an object, it is only the methods that are
interesting.
WTForms does not allow adding fields after initialization, so we
use a method to extend a form with a FormField that contains the
tag fields, checked and ready to go.
We make sure not to setattr the base as this will modify it
process wide, instead we let a new class inherit from the base.
"""
@classmethod
def extend_form(cls, base, tags, user=None):
"""Return an extended form with tag fields and user modifying method.
Arguments:
base: the form to extend
tags: the tags to extend the form with
user: a user to check tags against (optional)
If a user is passed, check the fields of the tags that the user
has, and set set_user_tags as a method with arguments attached.
Also set user as an attribute of the extended form.
"""
class ExtendedBase(base):
pass
Tags = cls.tag_form(tags, user)
ExtendedBase.tags = fields.FormField(Tags)
if user:
ExtendedBase.user = user
# Add TagForm.set_user_tags to Tags together with arguments.
ExtendedBase.set_user_tags = partialmethod(cls.set_user_tags, user)
return ExtendedBase
@classmethod
def tag_form(cls, tags, user=None):
class Tags(FlaskForm):
pass
for tag in tags:
# If user has this tag, set its value to checked
if user and user.has_tag(tag.name):
field = fields.BooleanField(tag.name, default=True)
else:
field = fields.BooleanField(tag.name)
# Add the field to this class with the name of the tag
setattr(Tags, tag.name, field)
Tags.tags = tags
Tags.checked_tags = partialmethod(cls.checked_tags)
return Tags
def checked_tags(self):
"""Get list of checked tags."""
tags = self.tags
checked = []
for tag in tags:
tag_field = getattr(self, tag.name)
if tag_field.data:
checked.append(tag.name)
return checked
@staticmethod
def set_user_tags(form, user):
"""Update user with new and removed tags."""
tag_form = form.tags
for tag in tag_form.tags:
tag_field = getattr(tag_form, tag.name)
# If tag field is checked and the user did not already
# have that tag, give user tag
if tag_field.data and not user.has_tag(tag.name):
user_tag = UserTag()
user_tag.tag = tag
user.tags.append(user_tag)
# If tag field isn't checked but the user has that tag,
# remove it.
elif not tag_field.data and user.has_tag(tag.name):
user_tag = UserTag.query.filter(UserTag.user == user,
UserTag.tag == tag,
UserTag.is_active == True
).one()
user_tag.end_association()
db.session.commit()
class LowercaseEmailField(html5_fields.EmailField):
"""Custom field that lowercases input."""
def process_formdata(self, valuelist):
valuelist[0] = valuelist[0].lower()
super().process_formdata(valuelist)
class Unique:
"""Validate that field is unique in model."""
def __init__(self, model, field, message='This element already exists.'):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
if (db.session.query(self.model)
.filter(self.field == field.data).scalar()):
raise validators.ValidationError(self.message)
class Exists:
"""Validate that field is unique in model."""
def __init__(self, model, field, message='This element does not exist.'):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
if not (db.session.query(self.model)
.filter(self.field == field.data).scalar()):
raise validators.ValidationError(self.message)
class RedirectForm(FlaskForm):
next = fields.HiddenField()
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
if not self.next.data:
self.next.data = get_redirect_target() or ''
def redirect(self, endpoint='index', **values):
if self.next.data and is_safe_url(self.next.data):
return redirect(self.next.data)
target = get_redirect_target()
return redirect(target or url_for(endpoint, **values))
class EmailForm(FlaskForm):
email = LowercaseEmailField('Email', validators=[
validators.InputRequired(),
validators.Email()
])
class ExistingEmailForm(FlaskForm):
email = LowercaseEmailField('Email', validators=[
validators.InputRequired(),
validators.Email(),
Exists(
User,
User.email,
message='Unknown email')
])
class PasswordForm(FlaskForm):
password = fields.PasswordField(
'Password',
validators=[validators.InputRequired()],
description="Required, your current password"
)
class NewPasswordForm(FlaskForm):
new_password = fields.PasswordField(
'New password',
validators=[validators.InputRequired(), validators.Length(min=8)],
description="Required, your new password. At least 8 characters long."
)
class ChangePasswordForm(PasswordForm, NewPasswordForm):
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def validate(self):
if not FlaskForm.validate(self):
return False
if not self.user.verify_password(self.password.data):
self.password.errors.append("Wrong password.")
return False
return True
class LoginForm(RedirectForm, EmailForm, PasswordForm):
"""Get login details."""
remember = fields.BooleanField('Remember me')
def __init__(self, *args, **kwargs):
self.user = None
super().__init__(*args, **kwargs)
def validate(self):
if not FlaskForm.validate(self):
return False
user = User.authenticate(self.email.data, self.password.data)
if not user:
return False
self.user = user
return True
class AddUserForm(FlaskForm):
first_name = fields.StringField('First Name', validators=[
validators.InputRequired()
])
last_name = fields.StringField('Last Name', validators=[
validators.InputRequired()
])
email = LowercaseEmailField('Email', validators=[
validators.InputRequired(),
validators.Email(),
Unique(
User,
User.email,
message="This email is already in use")
])
phone = html5_fields.TelField('Phone', validators=[
validators.Regexp(r'^\+?[0-9]*$')
])
class EditUserForm(FlaskForm):
email = LowercaseEmailField(
'Email',
validators=[
validators.InputRequired(),
validators.Email()
],
description="Required, a valid email address"
)
phone = html5_fields.TelField(
'Phone',
validators=[
validators.InputRequired(),
validators.Regexp(r'^\+?[0-9]*$')
],
description="Required, a phone number like 0701234567"
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(obj=user, *args, **kwargs)
def validate(self):
if not FlaskForm.validate(self):
return False
if db.session.query(User.id).filter_by(email=self.email.data).scalar():
if self.email.data != self.user.email:
self.email.errors.append("This email is already in use")
return False
return True
class FullEditUserForm(EditUserForm):
first_name = fields.StringField(
'First Name',
validators=[validators.InputRequired()],
description="Required, user's first name"
)
last_name = fields.StringField(
'Last Name',
validators=[validators.InputRequired()],
description="Required, user's last/family name"
)
class UploadForm(FlaskForm):
upload = fields.FileField('Image', validators=[
FileAllowed(images, 'Images only!')
])
class EditPostForm(UploadForm):
content_sv = fields.TextAreaField(validators=[
validators.InputRequired()
])
content_en = fields.TextAreaField()
readmore_sv = fields.TextAreaField()
readmore_en = fields.TextAreaField()
title = fields.StringField('Title', validators=[
validators.InputRequired()
])
published = fields.BooleanField('Publish')
class EditEventForm(EditPostForm):
start_time = fields.DateTimeField(format='%Y-%m-%d %H:%M', validators=[
validators.InputRequired()
])
location = fields.StringField('Location', validators=[
validators.InputRequired()
])
class UpdateContactsForm(FlaskForm):
pass
class EditScoreForm(FlaskForm):
name = fields.StringField('Score name', validators=[
validators.InputRequired()
])
composer = fields.StringField('Composer')
lyricist = fields.StringField('Lyricist')
arrangement = fields.StringField('Arrangement')
comments = fields.TextAreaField('Comments')
place = fields.StringField('Place')
amount = fields.IntegerField('Amount', validators=[
validators.Optional()
])
| {
"repo_name": "teknologkoren/fernet",
"path": "fernet/forms.py",
"copies": "1",
"size": "10537",
"license": "mpl-2.0",
"hash": -6312307037982863000,
"line_mean": 28.6816901408,
"line_max": 79,
"alpha_frac": 0.6004555376,
"autogenerated": false,
"ratio": 4.3559322033898304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000038066235249333845,
"num_lines": 355
} |
from functools import partialmethod
import itertools
import logging
import random
import pandas as pd
import numpy as np
__author__ = 'michael.p.schroeder@gmail.com'
import multiprocessing as mp
class MutExResult(object):
def __init__(self, coverage, signal, higher_coverage_count, lower_coverage_count, permutations,
mean_sim_coverage, stdev_sim_coverage,
sample_size, items):
self.stdev_sim_coverage = stdev_sim_coverage
self.items = items
self.sample_size = sample_size
self.mean_sim_coverage = mean_sim_coverage
self.permutations = permutations
self.lower_coverages = lower_coverage_count
self.higher_coverages = higher_coverage_count
self.signal = signal
self.coverage = coverage
self.signal_coverage_ratio = coverage / signal
self.mutex_pvalue = higher_coverage_count / permutations
self.co_occurence_pvalue = lower_coverage_count / permutations
self.zscore = (coverage - mean_sim_coverage) / stdev_sim_coverage
def __str__(self):
return "MuTexResult\n" \
" Zscore: {}\n" \
" Mutual Exclusive p-value: {}\n" \
" Co-occurence p-value: {}\n" \
" Permutations: {}\n" \
" Sample Coverage: {}\n" \
" Signal: {}".format(
self.zscore, self.mutex_pvalue, self.co_occurence_pvalue, self.permutations, self.coverage, self.signal
)
def __repr__(self):
return self.__str__()
class MutEx(object):
def __init__(self, background: pd.DataFrame, permutations: int=100):
"""
:param background: A data frame containing all the observations as binary data 1 and 0 or True and False where
rows represent observations and columns represent samples.
:param permutations: how many permutations by default
:return:
"""
self.permutations = permutations
self.background = background
self.sample_weights = background.apply(sum) / background.apply(sum).pipe(sum)
self.cummulative_sum = np.cumsum(self.sample_weights)
self.sample_indices = [x for x in range(0, background.shape[1])]
def calculate(self, indices: list, n=None, parallel=True, cores=0) -> MutExResult:
"""
:param indices: A list of indices for which to test the MutEx. The indices refer the the background-data row-ids.
:return: MutExResult
"""
if not all([x in self.background.index for x in indices]):
raise Exception("Not all indices found in background")
target = self.background.loc[indices]
coverage = target.apply(max).pipe(sum)
observation_signal = target.apply(sum, axis=1)
signal = sum(observation_signal)
if n == None:
n = self.permutations
logging.info("running {} permutations".format(n))
if not parallel:
cores = 1
pool = mp.Pool(processes=mp.cpu_count() if cores < 1 else cores)
logging.info('permutation with {} cores'.format(pool._processes))
partial_simul = partialmethod(self._one_permutation)
simulated_results = pool.starmap(partial_simul.func,
zip(itertools.repeat(coverage, n), itertools.repeat(observation_signal, n)))
pool.close() # we are not adding any more processes
pool.join() # tell it to wait until all threads are done before going on
logging.info('calculate result')
sim_coverages = [x[0] for x in simulated_results]
higher_coverage = [x[1] for x in simulated_results]
lower_coverage = [x[2] for x in simulated_results]
return MutExResult(coverage=coverage, signal=signal,
higher_coverage_count=np.sum(higher_coverage),
lower_coverage_count=np.sum(lower_coverage), permutations=n,
mean_sim_coverage=np.mean(sim_coverages),
stdev_sim_coverage=np.std(sim_coverages),
sample_size=len(self.sample_weights),
items=indices
)
def _one_permutation(self, coverage, observation_signal):
sim = self._simulate_observations(observation_signal)
sim_cov = sim.apply(max).pipe(sum)
higher_cov = sim_cov >= coverage
lower_cov = sim_cov <= coverage
return sim_cov, higher_cov, lower_cov
def _simulate_observations(self, observation_signal):
simulations = []
for observations in observation_signal:
simulations.append(self._weighted_choice(observations))
return pd.DataFrame.from_records(simulations).fillna(0)
def _weighted_choice(self, amount: int):
return {x: 1 for x in np.random.choice(self.sample_indices, amount, False, self.sample_weights)}
def test():
"""
:rtype : None
"""
import scipy.sparse as sparse
row, col = 100, 100
np.random.seed(77)
df = pd.DataFrame(sparse.random(row, col, density=0.15).A).apply(np.ceil)
df.loc[0] = [1 if x < 20 else 0 for x in range(0, df.shape[1])]
df.loc[1] = [1 if x > 13 and x < 35 else 0 for x in range(0, df.shape[1])]
df.loc[2] = [1 if x > 80 else 0 for x in range(0, df.shape[1])]
m = MutEx(background=df, permutations=1000)
pd.set_option('display.max_columns', 1000)
print(df.loc[[0, 1, 2]])
print("\nExample - 1 thread \n----------")
r = m.calculate([4, 5, 6], parallel=False)
print(r)
print("\nExample - multi-threaded \n----------")
r = m.calculate([0, 1, 2])
print(r)
random.seed(18)
group_generator = (random.sample(df.index.tolist(), random.sample([2, 3, 4], 1)[0]) for x in range(10))
result_list = [m.calculate(g) for g in group_generator]
print(pd.DataFrame.from_records([r.__dict__ for r in result_list]))
if __name__ == "__main__":
test()
| {
"repo_name": "mpschr/mutex",
"path": "mutex/mutex.py",
"copies": "1",
"size": "6093",
"license": "mit",
"hash": -654653638130358700,
"line_mean": 37.08125,
"line_max": 121,
"alpha_frac": 0.5995404563,
"autogenerated": false,
"ratio": 3.8784213876511777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9968879599235105,
"avg_score": 0.0018164489432143413,
"num_lines": 160
} |
from functools import partialmethod
base = 'http://lms.nthu.edu.tw'
class CourseRoute():
rules = {
'forum': 'forumlist',
'homework': 'hwlist',
'document': 'doclist',
'forum_detail': 'forumlist',
'homework_detail': 'hw',
'document_detail': 'doc',
'homework_handin_list_detail': 'hw_doclist',
}
rule_key = {
'forum_detail': 'tid',
'homework_detail': 'hw',
'document_detail': 'cid',
'homework_handin_list_detail': 'hw',
}
def __init__(self, course_id):
self.base = '{}/course.php?courseID={}'.format(base, course_id)
def gen_rule(self, func, uid=None):
if uid:
func += '_detail'
path = '%s&f=%s' % (self.base, self.rules[func])
if uid:
path += '&{}={}'.format(self.rule_key[func], uid)
return path
homework = partialmethod(gen_rule, 'homework')
homework_handin_list = partialmethod(gen_rule, 'homework_handin_list')
document = partialmethod(gen_rule, 'document')
forum = partialmethod(gen_rule, 'forum')
class Routes():
rules = {
'home': 'home.php',
'profile': 'home/profile.php',
'attach': 'sys/read_attach.php?id={attach_id}',
'login_submit': 'sys/lib/ajax/login_submit.php',
'score': 'course/http_hw_score.php',
'query_group': 'course/hw_group_score.php?courseID={course_id}&folderID={folder_id}&teamID={team_id}',
'group_score': 'update_group_score',
'post': 'sys/lib/ajax/post.php'
}
def __init__(self):
for rule, value in self.rules.items():
setattr(self, rule, '%s/%s' % (base, value))
self.course = CourseRoute
route = Routes()
| {
"repo_name": "leVirve/iLms-nthu-API",
"path": "ilms/route.py",
"copies": "1",
"size": "1743",
"license": "mit",
"hash": -2449156344603244000,
"line_mean": 27.5737704918,
"line_max": 110,
"alpha_frac": 0.5576592083,
"autogenerated": false,
"ratio": 3.2824858757062145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4340145084006215,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from acousticsim.main import analyze_file_segments
from acousticsim.analysis.intensity import signal_to_intensity_praat as PraatIntensity_signal, \
file_to_intensity_praat as PraatIntensity_file
from .segments import generate_utterance_segments
from ..exceptions import AcousticError, SpeakerAttributeError
from .utils import PADDING
def analyze_intensity(corpus_context,
call_back=None,
stop_check=None):
"""
Analyze intensity of an entire utterance, and save the resulting intensity tracks into the database.
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
corpus context to use
call_back : callable
call back function, optional
stop_check : function
stop check function, optional
"""
segment_mapping = generate_utterance_segments(corpus_context).grouped_mapping('speaker')
if call_back is not None:
call_back('Analyzing files...')
for i, (speaker, v) in enumerate(segment_mapping.items()):
gender = None
try:
q = corpus_context.query_speakers().filter(corpus_context.speaker.name == speaker)
q = q.columns(corpus_context.speaker.gender.column_name('Gender'))
gender = q.all()[0]['Gender']
except SpeakerAttributeError:
pass
if gender is not None:
intensity_function = generate_base_intensity_function(corpus_context, signal=True, gender=gender)
else:
intensity_function = generate_base_intensity_function(corpus_context, signal=True)
output = analyze_file_segments(v, intensity_function, padding=PADDING, stop_check=stop_check)
corpus_context.save_intensity_tracks(output, speaker)
def generate_base_intensity_function(corpus_context, signal=False, gender=None):
algorithm = corpus_context.config.intensity_source
if algorithm == 'praat':
if getattr(corpus_context.config, 'praat_path', None) is None:
raise (AcousticError('Could not find the Praat executable'))
if signal:
PraatIntensity = PraatIntensity_signal
else:
PraatIntensity = PraatIntensity_file
intensity_function = partial(PraatIntensity,
praat_path=corpus_context.config.praat_path,
time_step=0.01)
else:
raise (NotImplementedError('Only function for intensity currently implemented is Praat.'))
return intensity_function
| {
"repo_name": "samihuc/PolyglotDB",
"path": "polyglotdb/acoustics/intensity.py",
"copies": "1",
"size": "2582",
"license": "mit",
"hash": 7939694592654586000,
"line_mean": 42.0333333333,
"line_max": 109,
"alpha_frac": 0.6673121611,
"autogenerated": false,
"ratio": 4.14446227929374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5311774440393741,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from aiohttp_session import get_session
from .cfg import cfg
def message(request, message, level='info'):
request.setdefault(
cfg.REQUEST_FLASH_OUTGOING_KEY, []).append((message, level))
debug = partial(message, level='debug')
info = partial(message, level='info')
success = partial(message, level='success')
warning = partial(message, level='warning')
error = partial(message, level='error')
async def context_processor(request):
return {
'get_flashed_messages': lambda:
request.pop(cfg.REQUEST_FLASH_INCOMING_KEY, [])
}
async def middleware(app, handler):
async def process(request):
session = await get_session(request)
request[cfg.REQUEST_FLASH_INCOMING_KEY] = session.pop(
cfg.SESSION_FLASH_KEY, [])
response = await handler(request)
session[cfg.SESSION_FLASH_KEY] = (
request.get(cfg.REQUEST_FLASH_INCOMING_KEY, []) +
request.get(cfg.REQUEST_FLASH_OUTGOING_KEY, [])
)[:cfg.FLASH_QUEUE_LIMIT]
return response
return process
| {
"repo_name": "imbolc/aiohttp-login",
"path": "aiohttp_login/flash.py",
"copies": "1",
"size": "1100",
"license": "isc",
"hash": -5513786838899696000,
"line_mean": 28.7297297297,
"line_max": 68,
"alpha_frac": 0.6609090909,
"autogenerated": false,
"ratio": 3.678929765886288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9839838856786288,
"avg_score": 0,
"num_lines": 37
} |
from functools import partial
from base import DuplicateLabel
from pyll.base import Apply
from pyll import scope
from pyll import as_apply
#
# Hyperparameter Types
#
@scope.define
def hyperopt_param(label, obj):
""" A graph node primarily for annotating - VectorizeHelper looks out
for these guys, and optimizes subgraphs of the form:
hyperopt_param(<stochastic_expression>(...))
"""
return obj
def hp_pchoice(label, p_options):
"""
label: string
p_options: list of (probability, option) pairs
"""
if not isinstance(label, basestring):
raise TypeError('require string label')
p, options = zip(*p_options)
n_options = len(options)
ch = scope.hyperopt_param(label,
scope.categorical(
p,
upper=n_options))
return scope.switch(ch, *options)
def hp_choice(label, options):
if not isinstance(label, basestring):
raise TypeError('require string label')
ch = scope.hyperopt_param(label,
scope.randint(len(options)))
return scope.switch(ch, *options)
def hp_randint(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.hyperopt_param(label,
scope.randint(*args, **kwargs))
def hp_uniform(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.uniform(*args, **kwargs)))
def hp_quniform(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.quniform(*args, **kwargs)))
def hp_loguniform(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.loguniform(*args, **kwargs)))
def hp_qloguniform(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.qloguniform(*args, **kwargs)))
def hp_normal(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.normal(*args, **kwargs)))
def hp_qnormal(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.qnormal(*args, **kwargs)))
def hp_lognormal(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.lognormal(*args, **kwargs)))
def hp_qlognormal(label, *args, **kwargs):
if not isinstance(label, basestring):
raise TypeError('require string label')
return scope.float(
scope.hyperopt_param(label,
scope.qlognormal(*args, **kwargs)))
#
# Tools for extracting a search space from a Pyll graph
#
class Cond(object):
def __init__(self, name, val, op):
self.op = op
self.name = name
self.val = val
def __str__(self):
return 'Cond{%s %s %s}' % (self.name, self.op, self.val)
def __eq__(self, other):
return self.op == other.op and self.name == other.name and self.val == other.val
def __hash__(self):
return hash((self.op, self.name, self.val))
def __repr__(self):
return str(self)
EQ = partial(Cond, op='=')
def expr_to_config(expr, conditions, hps):
"""
Populate dictionary `hps` with the hyperparameters in pyll graph `expr`
and conditions for participation in the evaluation of `expr`.
Arguments:
expr - a pyll expression root.
conditions - a tuple of conditions (`Cond`) that must be True for
`expr` to be evaluated.
hps - dictionary to populate
Creates `hps` dictionary:
label -> { 'node': apply node of hyperparameter distribution,
'conditions': `conditions` + tuple,
'label': label
}
"""
expr = as_apply(expr)
if conditions is None:
conditions = ()
assert isinstance(expr, Apply)
if expr.name == 'switch':
idx = expr.inputs()[0]
options = expr.inputs()[1:]
assert idx.name == 'hyperopt_param'
assert idx.arg['obj'].name in (
'randint', # -- in case of hp.choice
'categorical', # -- in case of hp.pchoice
)
expr_to_config(idx, conditions, hps)
for ii, opt in enumerate(options):
expr_to_config(opt,
conditions + (EQ(idx.arg['label'].obj, ii),),
hps)
elif expr.name == 'hyperopt_param':
label = expr.arg['label'].obj
if label in hps:
if hps[label]['node'] != expr.arg['obj']:
raise DuplicateLabel(label)
hps[label]['conditions'].add(conditions)
else:
hps[label] = {'node': expr.arg['obj'],
'conditions': set((conditions,)),
'label': label,
}
else:
for ii in expr.inputs():
expr_to_config(ii, conditions, hps)
| {
"repo_name": "jaberg/hyperopt",
"path": "hyperopt/pyll_utils.py",
"copies": "3",
"size": "5716",
"license": "bsd-3-clause",
"hash": 809872000066592400,
"line_mean": 29.0842105263,
"line_max": 88,
"alpha_frac": 0.5787263821,
"autogenerated": false,
"ratio": 4.068327402135231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007879344084559555,
"num_lines": 190
} |
from functools import partial
from bluesky.standard_config import *
from bluesky.scans import *
from bluesky.callbacks import *
from bluesky.broker_callbacks import *
from bluesky.suspenders import *
# Set up required metadata fields with dummy values for now.
gs.RE.md['group'] = ''
gs.RE.md['config'] = {}
gs.RE.md['beamline_id'] = 'SRX'
def print_scan_ids(start_doc):
print("Transient Scan ID: {0}".format(start_doc['scan_id']))
print("Persistent Unique Scan ID: '{0}'".format(start_doc['uid']))
gs.RE.subscribe('start', print_scan_ids)
# RE.logbook = olog_wrapper(olog_client, ['Data Acquisitioning'])
checklist = partial(basic_checklist, ca_url='http://xf05id-ca1.cs.nsls2.local:4800',
disk_storage=[('/', 1e9)],
# pv_names=['XF:23ID1-ES{Dif-Ax:SY}Pos-SP'],
pv_conditions=[('XF:05ID-PPS{Sh:WB}Sts:Cls-Sts', 'front-end shutter is open', assert_pv_equal, 0),
('SR:C03-BI{DCCT:1}I:Real-I', 'beam current is above 50mA', assert_pv_greater, 50),]
)
PVSuspendFloor(gs.RE, 'SR:C03-BI{DCCT:1}I:Real-I', 50, resume_thresh=100, sleep=300)
PVSuspendBoolHigh(gs.RE, 'XF:05ID-PPS{Sh:WB}Sts:Cls-Sts', 50, sleep=300)
| {
"repo_name": "NSLS-II-SRX/ipython_ophyd",
"path": "profile_bs/startup/01-bluesky.py",
"copies": "1",
"size": "1235",
"license": "bsd-2-clause",
"hash": 3936228929463587000,
"line_mean": 41.5862068966,
"line_max": 119,
"alpha_frac": 0.6372469636,
"autogenerated": false,
"ratio": 2.8654292343387473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4002676197938747,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from boardFrame import BoardFrame
from board import Board
from Tkinter import *
from pubsub import pub
from ai import AI
class BoardController:
def __init__(self):
self.root = Tk()
w, h = self.root.winfo_screenwidth()/2, self.root.winfo_screenheight()
self.root.geometry("%dx%d+0+0" % (w, h))
self.boardFrame = BoardFrame(self.root)
self.board = Board()
self.boardFrame.pack(side=TOP, fill=BOTH, expand=True)
self.ai = AI()
# Define commands linked to the view buttons
self.defineCardsActions()
self.boardFrame.possibleMovesButton.configure(command=self.showPossibleMoves)
# Subscribes to event from the board to know when to refresh the GUI
pub.subscribe(self.listenerGui, 'refreshGUITopic')
# Subscribes to event when a card is clicked
pub.subscribe(self.listenerClick, 'cardClicked')
self.clickCount = 0
self.prevClickedCard = None
# Show board for the first time
pub.sendMessage('refreshGUITopic')
# Listener for GUI refresh
# When the GUI is refreshed we also need to re-bind the cards buttons
# to the right actions
def listenerGui(self):
self.refreshGui()
self.defineCardsActions()
if (self.ai.willWin(self.board)):
print("PLayer will win")
# Listen for cards which are clicked and either keep the card in memory
# or call the board method to choose what to do
def listenerClick(self, cardClicked):
# If the stock card is clicked don't wait from a second card
if (cardClicked == "stock"):
self.clickCount = 0
self.board.chooseMovement(cardClicked)
return 0
self.clickCount += 1
if (self.clickCount == 1):
self.prevClickedCard = cardClicked
elif (self.clickCount == 2):
self.clickCount = 0
self.board.chooseMovement(self.prevClickedCard, cardClicked)
else:
return 0
def startGame(self):
self.root.mainloop()
def refreshGui(self):
self.boardFrame.updateGUI(self.board)
def showPossibleMoves(self):
possibleMoves = self.ai.possibleMoves(self.board)
movesButtons = {}
for origin, destination in possibleMoves:
# Get button corresponding to card orgin
if (len(self.board.H)>0 and origin == self.board.H[-1]):
buttonOrigin = self.boardFrame.HButton
elif (len(self.board.S)>0 and origin == self.board.S[-1]):
buttonOrigin = self.boardFrame.SButton
elif (len(self.board.C)>0 and origin == self.board.C[-1]):
buttonOrigin = self.boardFrame.CButton
elif (len(self.board.D)>0 and origin == self.board.D[-1]):
buttonOrigin = self.boardFrame.DButton
elif (len(self.board.waste)>0 and origin == self.board.waste[-1]):
buttonOrigin = self.boardFrame.wasteButton
else:
buttonOrigin = self.boardFrame.cardButtons[origin]
# Get button corresponding to card destination
if (destination == "H"):
buttonDestination = self.boardFrame.HButton
elif (destination == "S"):
buttonDestination = self.boardFrame.SButton
elif (destination == "C"):
buttonDestination = self.boardFrame.CButton
elif (destination == "D"):
buttonDestination = self.boardFrame.DButton
else:
buttonDestination = self.boardFrame.cardButtons[destination]
if not buttonOrigin in movesButtons:
movesButtons[buttonOrigin] = []
movesButtons[buttonOrigin].append(buttonDestination)
self.boardFrame.showPossibleMoves(movesButtons)
def defineCardsActions(self):
# This dictionnary contains the cards and the command to bound
cardActions = {}
bf = self.boardFrame
# Cards from stock and waste
cardActions[self.boardFrame.stockButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="stock")
cardActions[self.boardFrame.wasteButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="W")
# Cards from foundations
cardActions[bf.HButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="H")
cardActions[bf.CButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="C")
cardActions[bf.SButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="S")
cardActions[bf.DButton]=partial(pub.sendMessage, 'cardClicked', cardClicked="D")
# cards from the tableau
for card in bf.cardButtons.keys():
# find the pile in which the card is
pileIndex = -1
for s in self.board.PlayingStacks:
if (card in s):
pileIndex = self.board.PlayingStacks.index(s)
break
cardActions[bf.cardButtons[card]]= partial(pub.sendMessage, 'cardClicked', cardClicked=card)
# Empty tableau piles
for frame, button in bf.tableauFirstCardButtons.items():
cardActions[button] = partial(pub.sendMessage, 'cardClicked', cardClicked=frame)
# actually bind the buttons
for button in cardActions:
button.configure(command=cardActions[button])
return 0
| {
"repo_name": "statox/pylitaire",
"path": "src/boardController.py",
"copies": "1",
"size": "5496",
"license": "mit",
"hash": 3706025641421150000,
"line_mean": 39.4117647059,
"line_max": 109,
"alpha_frac": 0.624636099,
"autogenerated": false,
"ratio": 4.151057401812689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004291101884935041,
"num_lines": 136
} |
from functools import partial
from broca.tokenize import Tokenizer
from broca.common.shared import spacy
from broca.common.util import parallel, penn_to_wordnet
from broca.tokenize.keyword.rake import RAKETokenizer
from gensim.models import Phrases
from nltk.stem.wordnet import WordNetLemmatizer
class OverkillTokenizer(Tokenizer):
def __init__(self, lemmatize=True, n_jobs=1, bigram=None, trigram=None, min_count=5, threshold=10.):
self.lemmatize = lemmatize
self.n_jobs = n_jobs
self.bigram = bigram
self.trigram = trigram
self.min_count = min_count
self.threshold = threshold
def tokenize(self, docs):
if self.lemmatize:
lem = WordNetLemmatizer()
#print('RAKE tokenizing...')
pre_tdocs = RAKETokenizer(n_jobs=self.n_jobs).tokenize(docs)
for i, tdoc in enumerate(pre_tdocs):
for t in tdoc:
if t.startswith('one'):
print(t)
print(i)
#print('Additional Tokenizing docs...')
if self.n_jobs == 1:
tdocs = [pre_tokenize(doc, tdoc, lem=lem) for doc, tdoc in zip(docs, pre_tdocs)]
else:
tdocs = parallel(partial(pre_tokenize, lem=lem), zip(docs, pre_tdocs), self.n_jobs, expand_args=True)
#print('Training bigram...')
if self.bigram is None:
self.bigram = Phrases(tdocs,
min_count=self.min_count,
threshold=self.threshold,
delimiter=b' ')
else:
self.bigram.add_vocab(tdocs)
#print('Training trigram...')
if self.trigram is None:
self.trigram = Phrases(self.bigram[tdocs],
min_count=self.min_count,
threshold=self.threshold,
delimiter=b' ')
else:
self.trigram.add_vocab(self.bigram[tdocs])
return [tdoc for tdoc in self.trigram[self.bigram[tdocs]]]
def pre_tokenize(doc, tdoc, lem):
# Split phrase keywords into 1gram keywords,
# to check tokens against
# We learn keyphrases later on.
kws_1g = [t.split(' ') for t in tdoc]
kws_1g = [kw for grp in kws_1g for kw in grp]
toks = spacy(doc, tag=True, parse=False, entity=False)
tagged = [(t.lower_.strip(), t.tag_) for t in toks]
toks = []
for tok, tag in tagged:
if tok in kws_1g:
wn_tag = penn_to_wordnet(tag)
if wn_tag is not None:
toks.append(lem.lemmatize(tok, wn_tag))
return toks
| {
"repo_name": "ftzeng/broca",
"path": "broca/tokenize/keyword/overkill.py",
"copies": "2",
"size": "2658",
"license": "mit",
"hash": 1445650539983239200,
"line_mean": 33.9736842105,
"line_max": 113,
"alpha_frac": 0.568096313,
"autogenerated": false,
"ratio": 3.553475935828877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017138368997563886,
"num_lines": 76
} |
from functools import partial
from celery.schedules import crontab
from celery.task import periodic_task
from corehq.apps.commtrack.models import CommTrackUser, SupplyPointCase
from corehq.apps.sms.api import send_sms_to_verified_number
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, SupplyPointStatusValues
from custom.ilsgateway.reminders import REMINDER_DELIVERY_FACILITY, REMINDER_DELIVERY_DISTRICT, update_statuses
from custom.ilsgateway.utils import send_for_day, get_current_group, get_groups
import settings
def send_delivery_reminder(domain, date, loc_type='FACILITY'):
if loc_type == 'FACILITY':
status_type = SupplyPointStatusTypes.DELIVERY_FACILITY
sms_text = REMINDER_DELIVERY_FACILITY
elif loc_type == 'DISTRICT':
status_type = SupplyPointStatusTypes.DELIVERY_DISTRICT
sms_text = REMINDER_DELIVERY_DISTRICT
else:
return
current_group = get_current_group()
sp_ids = set()
for user in CommTrackUser.by_domain(domain):
if user.is_active and user.location and user.location.location_type == loc_type:
sp = SupplyPointCase.get_by_location(user.location)
if sp and current_group in get_groups(sp.location.metadata.get('groups', None)) and not \
SupplyPointStatus.objects.filter(supply_point=sp._id,
status_type=status_type,
status_date__gte=date).exists():
if user.get_verified_number():
send_sms_to_verified_number(user.get_verified_number(), sms_text)
sp_ids.add(sp._id)
update_statuses(sp_ids, status_type, SupplyPointStatusValues.REMINDER_SENT)
facility_partial = partial(send_for_day, cutoff=15, f=send_delivery_reminder)
district_partial = partial(send_for_day, cutoff=13, f=send_delivery_reminder, loc_type='DISTRICT')
@periodic_task(run_every=crontab(day_of_month="13-15", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def first_facility_delivery_task():
facility_partial(15)
@periodic_task(run_every=crontab(day_of_month="20-22", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def second_facility_delivery_task():
facility_partial(22)
@periodic_task(run_every=crontab(day_of_month="26-30", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def third_facility_delivery_task():
facility_partial(30)
@periodic_task(run_every=crontab(day_of_month="11-13", hour=8, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def first_district_delivery_task():
district_partial(13)
@periodic_task(run_every=crontab(day_of_month="18-20", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def second_district_delivery_task():
district_partial(20)
@periodic_task(run_every=crontab(day_of_month="26-28", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def third_district_delivery_task():
district_partial(28) | {
"repo_name": "SEL-Columbia/commcare-hq",
"path": "custom/ilsgateway/reminders/delivery.py",
"copies": "1",
"size": "3141",
"license": "bsd-3-clause",
"hash": -598844753103776100,
"line_mean": 45.8955223881,
"line_max": 134,
"alpha_frac": 0.7026424706,
"autogenerated": false,
"ratio": 3.3097997892518443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45124422598518443,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections.abc import Iterator
from toolz.compatibility import iteritems, map
from toolz import assoc
from .utils import transitive_get as walk
from .variable import Var, var, isvar
from .dispatch import dispatch
################
# Reificiation #
################
@dispatch(Iterator, dict)
def _reify(t, s):
return map(partial(reify, s=s), t)
# return (reify(arg, s) for arg in t)
@dispatch(tuple, dict)
def _reify(t, s):
return tuple(reify(iter(t), s))
@dispatch(list, dict)
def _reify(t, s):
return list(reify(iter(t), s))
@dispatch(dict, dict)
def _reify(d, s):
return dict((k, reify(v, s)) for k, v in d.items())
@dispatch(object, dict)
def _reify(o, s):
return o # catch all, just return the object
def reify(e, s):
""" Replace variables of expression with substitution
>>> x, y = var(), var()
>>> e = (1, x, (3, y))
>>> s = {x: 2, y: 4}
>>> reify(e, s)
(1, 2, (3, 4))
>>> e = {1: x, 3: (y, 5)}
>>> reify(e, s)
{1: 2, 3: (4, 5)}
"""
if isvar(e):
return reify(s[e], s) if e in s else e
return _reify(e, s)
###############
# Unification #
###############
seq = tuple, list, Iterator
@dispatch(seq, seq, dict)
def _unify(u, v, s):
if len(u) != len(v):
return False
for uu, vv in zip(u, v): # avoiding recursion
s = unify(uu, vv, s)
if s is False:
return False
return s
@dispatch((set, frozenset), (set, frozenset), dict)
def _unify(u, v, s):
i = u & v
u = u - i
v = v - i
return _unify(sorted(u), sorted(v), s)
@dispatch(dict, dict, dict)
def _unify(u, v, s):
if len(u) != len(v):
return False
for key, uval in iteritems(u):
if key not in v:
return False
s = unify(uval, v[key], s)
if s is False:
return False
return s
@dispatch(object, object, dict)
def _unify(u, v, s):
return False # catch all
@dispatch(object, object, dict)
def unify(u, v, s): # no check at the moment
""" Find substitution so that u == v while satisfying s
>>> x = var('x')
>>> unify((1, x), (1, 2), {})
{~x: 2}
"""
u = walk(u, s)
v = walk(v, s)
if u == v:
return s
if isvar(u):
return assoc(s, u, v)
if isvar(v):
return assoc(s, v, u)
return _unify(u, v, s)
@dispatch(object, object)
def unify(u, v):
return unify(u, v, {})
| {
"repo_name": "mrocklin/unification",
"path": "unification/core.py",
"copies": "1",
"size": "2452",
"license": "bsd-3-clause",
"hash": -8888234438738700000,
"line_mean": 20.1379310345,
"line_max": 59,
"alpha_frac": 0.5383360522,
"autogenerated": false,
"ratio": 2.8578088578088576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3896144910008858,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections.abc import MutableSequence
class BasePoolArray(BaseBuiltinWithGDObjOwnership, MutableSequence):
__slots__ = ()
GD_TYPE = None
def _gd_to_py(self, value):
return self._contained_cls.build_from_gdobj(value)
def _py_to_gd(self, value):
self._check_param_type('value', value, self._contained_cls)
return value._gd_ptr
def __init__(self, items=None):
if isinstance(items, self._cls):
self._gd_ptr = self._gd_array_alloc()
self._gd_array_new_copy(self._gd_ptr, items._gd_ptr)
else:
self._gd_ptr = self._gd_array_alloc()
self._gd_array_new(self._gd_ptr)
# TODO: use godot_pool_*_array_new_with_array
if items:
self += items
def __repr__(self):
return "<%s(%s)>" % (type(self).__name__, [x for x in self])
def __eq__(self, other):
# TODO: should be able to optimize this...
try:
return list(self) == list(other)
except TypeError:
return False
def __ne__(self, other):
return not self == other
def __iadd__(self, items):
if isinstance(items, (str, bytes)):
return NotImplemented
# TODO: use godot_pool_vector3_array_append_array
for x in items:
self.append(x)
return self
def __radd__(self, items):
return self._cls(items) + self
def __add__(self, items):
if isinstance(items, (str, bytes)):
return NotImplemented
arr = self._cls()
# TODO: use godot_pool_vector3_array_append_array
for x in self:
arr.append(x)
for x in items:
arr.append(x)
return arr
def __iter__(self):
# TODO: mid iteration mutation should throw exception ?
for c in range(len(self)):
yield self[c]
def __getitem__(self, idx):
if isinstance(idx, slice):
return self._cls([x for x in self][idx])
size = len(self)
idx = size + idx if idx < 0 else idx
if abs(idx) >= size:
raise IndexError('list index out of range')
raw = self._gd_array_get(self._gd_ptr, idx)
return self._gd_to_py(raw)
def __setitem__(self, idx, value):
size = len(self)
idx = size + idx if idx < 0 else idx
if abs(idx) >= size:
raise IndexError('list index out of range')
value = self._py_to_gd(value)
self._gd_array_set(self._gd_ptr, idx, value)
def __delitem__(self, idx):
size = len(self)
idx = size + idx if idx < 0 else idx
if abs(idx) >= size:
raise IndexError('list index out of range')
self._gd_array_remove(self._gd_ptr, idx)
def __len__(self):
return self._gd_array_size(self._gd_ptr)
# Methods
def append(self, value):
value = self._py_to_gd(value)
self._gd_array_append(self._gd_ptr, value)
def insert(self, pos, value):
value = self._py_to_gd(value)
if self._gd_array_insert(self._gd_ptr, pos, value) != lib.GODOT_OK:
raise IndexError("list assignment index out of range")
def invert(self):
self._gd_array_invert(self._gd_ptr)
def push_back(self, value):
value = self._py_to_gd(value)
self._gd_array_push_back(self._gd_ptr, value)
def resize(self, size):
self._check_param_type('size', size, int)
self._gd_array_resize(self._gd_ptr, size)
def _generate_pool_array(clsname, pycls, gdname, py_to_gd=None, gd_to_py=None):
godot_x_alloc = globals()['godot_%s_alloc' % gdname]
godot_x_new_copy = getattr(lib, 'godot_%s_new_copy' % gdname)
def _copy_gdobj(gdobj):
cpy_gdobj = godot_x_alloc()
godot_x_new_copy(cpy_gdobj, gdobj)
return cpy_gdobj
nmspc = {
'__slots__': (),
'GD_TYPE': getattr(lib, 'GODOT_VARIANT_TYPE_%s' % gdname.upper()),
'_copy_gdobj': staticmethod(_copy_gdobj),
'_contained_cls': pycls,
'_gd_array_alloc': staticmethod(godot_x_alloc)
}
for suffix in ('new', 'new_copy', 'get', 'set', 'remove', 'size',
'append', 'insert', 'invert', 'push_back', 'resize'):
nmspc['_gd_array_%s' % suffix] = getattr(lib, 'godot_%s_%s' % (gdname, suffix))
if py_to_gd:
nmspc['_py_to_gd'] = py_to_gd
if gd_to_py:
nmspc['_gd_to_py'] = gd_to_py
cls = type(clsname, (BasePoolArray, ), nmspc)
cls._cls = cls
return cls
PoolColorArray = _generate_pool_array('PoolColorArray', Color, 'pool_color_array')
PoolVector2Array = _generate_pool_array('PoolVector2Array', Vector2, 'pool_vector2_array')
PoolVector3Array = _generate_pool_array('PoolVector3Array', Vector3, 'pool_vector3_array')
def _identity(self, value):
return value
def _byte_py_to_gd(self, value):
if not isinstance(value, int):
raise TypeError("'%s' object cannot be interpreted as an integer" % type(value).__name__)
if not 0 <= int(value) < 256:
raise ValueError('bytes must be in range(0, 256)')
return value
def _int_py_to_gd(self, value):
self._check_param_type('value', value, int)
return value
def _real_py_to_gd(self, value):
self._check_param_float('value', value)
return value
def _string_gd_to_py(self, value):
return godot_string_to_pyobj(ffi.addressof(value))
def _string_py_to_gd(self, value):
self._check_param_type('value', value, str)
return pyobj_to_gdobj(value)
PoolByteArray = _generate_pool_array('PoolByteArray', int, 'pool_byte_array', py_to_gd=_byte_py_to_gd, gd_to_py=_identity)
PoolIntArray = _generate_pool_array('PoolIntArray', int, 'pool_int_array', py_to_gd=_int_py_to_gd, gd_to_py=_identity)
PoolRealArray = _generate_pool_array('PoolRealArray', float, 'pool_real_array', py_to_gd=_real_py_to_gd, gd_to_py=_identity)
PoolStringArray = _generate_pool_array('PoolStringArray', str, 'pool_string_array', py_to_gd=_string_py_to_gd, gd_to_py=_string_gd_to_py)
| {
"repo_name": "razvanc-r/godot-python",
"path": "pythonscript/cffi_bindings/builtin_pool_arrays.inc.py",
"copies": "1",
"size": "6098",
"license": "mit",
"hash": -5106850801166408000,
"line_mean": 32.1413043478,
"line_max": 137,
"alpha_frac": 0.5878976714,
"autogenerated": false,
"ratio": 3.264453961456103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4352351632856103,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import Collection, Mapping
import asyncio
import aiofiles
import os
from peewee import Func
from peewee import Field
class csv_writer:
def __init__(self,fh,fieldnames=None,delimiter=','):
self.fh = fh
self.delimiter = delimiter
if fieldnames:
self.fieldnames = list(fieldnames)
else:
self.fieldnames = None
async def writerow(self,row):
if isinstance(row,Collection):
row = (self.delimiter).join([str(i) for i in row])
elif isinstance(row,Mapping):
if not self.fieldnames:
raise AttributeError("do not have header")
else:
row_ = [row.get(i,"") for i in self.fieldnames]
row = (self.delimiter).join(row_)
else:
raise AttributeError("unsupport row type")
try:
await self.fh.write(row+os.linesep)
except TypeError as te:
#self.fh.write(row+os.linesep)
pass
except:
raise
async def writeheader(self,fieldnames=None):
if fieldnames:
self.fieldnames = list(fieldnames)
if self.fieldnames:
row = (self.delimiter).join(self.fieldnames)
try:
await self.fh.write(row+os.linesep)
except TypeError as te:
#self.fh.write(row+os.linesep)
pass
except:
raise
async def aiodump_csv(query, file_or_name, loop= None,include_header=True, close_file=False,
append=True, csv_writer=csv_writer):
if isinstance(file_or_name, str):
close_file=True
#fh = await aiofiles.open(file_or_name, append and 'a' or 'w')
fh = await aiofiles.open(file_or_name, 'w')
writer = csv_writer(fh)
else:
fh = file_or_name
if append:
try:
await fh.seek(0, 2)
except TypeError as te:
fh.seek(0, 2)
except:
raise
writer = csv_writer(fh)
if include_header:
header = []
for idx, node in enumerate(query._select):
if node._alias:
header.append(node._alias)
elif isinstance(node, (Field, Func)):
header.append(node.name)
else:
header.append('col_%s' % idx)
await writer.writeheader(header)
for row in (await query.tuples()):
await writer.writerow(row)
if close_file:
try:
await fh.close()
except TypeError as te:
fh.close()
except:
raise
return fh
| {
"repo_name": "Python-Tools/aioorm",
"path": "aioorm/utils/csv_utils/csv_dump.py",
"copies": "1",
"size": "2707",
"license": "mpl-2.0",
"hash": 8959602636732554000,
"line_mean": 28.4239130435,
"line_max": 92,
"alpha_frac": 0.5397118581,
"autogenerated": false,
"ratio": 4.126524390243903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166236248343903,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import Counter
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.db.models import TextField
from django.db import router
from django.contrib.admin.utils import NestedObjects
from django.contrib.gis.db.models import ManyToManyField
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from ckeditor_uploader.widgets import CKEditorUploadingWidget
from djgeojson.fields import GeoJSONFormField
from leaflet.admin import LeafletGeoAdmin
from nested_admin.nested import NestedModelAdminMixin, NestedStackedInline
from parler.admin import TranslatableAdmin, TranslatableStackedInline
from parler.forms import TranslatableModelForm, TranslatableBaseInlineFormSet
from democracy import models
from democracy.admin.widgets import Select2SelectMultiple, ShortTextAreaWidget
from democracy.enums import InitialSectionType
from democracy.models.utils import copy_hearing
from democracy.plugins import get_implementation
class FixedModelForm(TranslatableModelForm):
# Taken from https://github.com/asyncee/django-easy-select2/blob/master/easy_select2/forms.py
"""
Simple child of ModelForm that removes the 'Hold down "Control" ...'
message that is enforced in select multiple fields.
See https://github.com/asyncee/django-easy-select2/issues/2
and https://code.djangoproject.com/ticket/9321
Removes also help_texts of GeoJSONFormFields as those will have maps.
"""
def __init__(self, *args, **kwargs):
super(FixedModelForm, self).__init__(*args, **kwargs)
msg = force_text(_('Hold down "Control", or "Command" on a Mac, to select more than one.'))
for name, field in self.fields.items():
if isinstance(field, GeoJSONFormField):
field.help_text = ''
else:
field.help_text = field.help_text.replace(msg, '')
# Inlines
class SectionImageInline(TranslatableStackedInline, NestedStackedInline):
model = models.SectionImage
extra = 0
exclude = ("title",)
formfield_overrides = {
TextField: {'widget': ShortTextAreaWidget}
}
class SectionInlineFormSet(TranslatableBaseInlineFormSet):
def clean(self):
super().clean()
# validate that there is exactly one main and no more than one closure info sections
mains = 0
closure_infos = 0
for form in self.forms:
if not hasattr(form, 'cleaned_data') or form.cleaned_data.get('DELETE'):
continue
section_type = form.cleaned_data.get('type')
if not section_type:
continue
if section_type.identifier == InitialSectionType.MAIN:
mains += 1
elif section_type.identifier == InitialSectionType.CLOSURE_INFO:
closure_infos += 1
if mains != 1:
raise ValidationError(_('There must be exactly one main section.'))
if closure_infos > 1:
raise ValidationError(_('There cannot be more than one closure info section.'))
class SectionInline(NestedStackedInline, TranslatableStackedInline):
model = models.Section
extra = 1
inlines = [SectionImageInline]
exclude = ("published",)
formfield_overrides = {
TextField: {'widget': ShortTextAreaWidget}
}
formset = SectionInlineFormSet
def formfield_for_dbfield(self, db_field, **kwargs):
obj = kwargs.pop("obj", None)
if db_field.name == "content":
kwargs["widget"] = CKEditorUploadingWidget
# Some initial value is needed for every section to workaround a bug in nested inlines
# that causes an integrity error to be raised when a section image is added but the parent
# section isn't saved.
kwargs["initial"] = _("Enter text here.")
if not getattr(obj, "pk", None):
if db_field.name == "type":
kwargs["initial"] = models.SectionType.objects.get(identifier=InitialSectionType.MAIN)
elif db_field.name == "content":
kwargs["initial"] = _("Enter the introduction text for the hearing here.")
field = super().formfield_for_dbfield(db_field, **kwargs)
if db_field.name == "plugin_identifier":
widget = self._get_plugin_selection_widget(hearing=obj)
field.label = _("Plugin")
field.widget = widget
if db_field.name == "id" and not (obj and obj.pk):
field.widget = forms.HiddenInput()
return field
def _get_plugin_selection_widget(self, hearing):
choices = [("", "------")]
plugins = getattr(settings, "DEMOCRACY_PLUGINS")
if hearing and hearing.pk:
current_plugin_identifiers = set(hearing.sections.values_list("plugin_identifier", flat=True))
else:
current_plugin_identifiers = set()
for plugin_identifier in sorted(current_plugin_identifiers):
if plugin_identifier and plugin_identifier not in plugins:
# The plugin has been unregistered or something?
choices.append((plugin_identifier, plugin_identifier))
for idfr, classpath in sorted(plugins.items()):
choices.append((idfr, get_implementation(idfr).display_name or idfr))
widget = forms.Select(choices=choices)
return widget
def get_formset(self, request, obj=None, **kwargs):
kwargs["formfield_callback"] = partial(self.formfield_for_dbfield, request=request, obj=obj)
if getattr(obj, "pk", None):
kwargs['extra'] = 0
return super().get_formset(request, obj, **kwargs)
# Admins
class HearingGeoAdmin(LeafletGeoAdmin):
settings_overrides = {
'DEFAULT_CENTER': settings.DEFAULT_MAP_COORDINATES,
'DEFAULT_ZOOM': settings.DEFAULT_MAP_ZOOM,
}
class HearingAdmin(NestedModelAdminMixin, HearingGeoAdmin, TranslatableAdmin):
class Media:
js = ("admin/ckeditor-nested-inline-fix.js",)
inlines = [SectionInline]
list_display = ("slug", "published", "title", "open_at", "close_at", "force_closed")
list_filter = ("published",)
search_fields = ("slug", "translations__title")
readonly_fields = ("preview_url",)
raw_id_fields = ("project_phase",)
fieldsets = (
(None, {
"fields": ("title", "labels", "slug", "preview_url", "organization")
}),
(_("Project"), {
"fields": ("project_phase",)
}),
(_("Availability"), {
"fields": ("published", "open_at", "close_at", "force_closed")
}),
(_("Area"), {
"fields": ("geojson",)
}),
(_("Contact info"), {
"fields": ("contact_persons",)
})
)
formfield_overrides = {
TextField: {'widget': ShortTextAreaWidget}
}
form = FixedModelForm
actions = ["copy_as_draft"] # delete_selected is built_in, should not be added
ordering = ("slug",)
def copy_as_draft(self, request, queryset):
for hearing in queryset:
copy_hearing(hearing, published=False)
self.message_user(request, _('Copied Hearing "%s" as a draft.' % hearing.title))
def preview_url(self, obj):
if not obj.preview_url:
return ''
return format_html(
'<a href="%s">%s</a>' % (obj.preview_url, obj.preview_url)
)
preview_url.short_description = _('Preview URL')
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "labels":
kwargs["widget"] = Select2SelectMultiple
return super().formfield_for_manytomany(db_field, request, **kwargs)
def get_deleted_objects(self, objs, request):
# we override here to allow soft_delete, modified from
# https://github.com/django/django/blob/master/django/contrib/admin/utils.py
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Return a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
try:
obj = objs[0]
except IndexError:
return [], {}, set(), []
else:
using = router.db_for_write(obj._meta.model)
collector = NestedObjects(using=using)
collector.collect(objs)
def format_callback(obj):
return '%s: %s' % (capfirst(obj._meta.verbose_name), obj)
to_delete = collector.nested(format_callback)
model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()}
# we need to display count by model of the protected items too
protected = [format_callback(obj) for obj in collector.protected]
protected_model = {obj._meta.verbose_name_plural for obj in collector.protected}
protected_model_count = dict(Counter(protected_model))
# since we are only performing soft delete, we may soft delete the protected objects later
return to_delete + protected, {**model_count, **protected_model_count}, set(), []
def delete_queryset(self, request, queryset):
# this method is called by delete_selected and can be overridden
try:
obj = queryset[0]
except IndexError:
return
else:
using = router.db_for_write(obj._meta.model)
collector = NestedObjects(using=using)
collector.collect(queryset)
to_delete = []
for item, value in collector.model_objs.items():
to_delete += value
to_delete += collector.protected
# since we are only performing soft delete, we must soft_delete related objects too, if possible
for obj in to_delete:
if hasattr(obj, 'soft_delete'):
obj.soft_delete()
def delete_model(self, request, obj):
using = router.db_for_write(obj._meta.model)
collector = NestedObjects(using=using)
collector.collect([obj])
to_delete = []
for item, value in collector.model_objs.items():
to_delete += value
to_delete += collector.protected
# since we are only performing soft delete, we must soft_delete related objects too, if possible
for obj in to_delete:
if hasattr(obj, 'soft_delete'):
obj.soft_delete()
def save_formset(self, request, form, formset, change):
objects = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.soft_delete()
for obj in objects:
obj.save()
formset.save_m2m()
class LabelAdmin(TranslatableAdmin, admin.ModelAdmin):
exclude = ("published",)
class SectionTypeAdmin(admin.ModelAdmin):
fields = ("name_singular", "name_plural")
def get_queryset(self, request):
return super().get_queryset(request).exclude_initial()
class OrganizationAdmin(admin.ModelAdmin):
formfield_overrides = {
ManyToManyField: {'widget': FilteredSelectMultiple("ylläpitäjät", is_stacked=False)},
}
exclude = ('published', )
class ContactPersonAdmin(TranslatableAdmin, admin.ModelAdmin):
list_display = ('name', 'title', 'organization', 'phone', 'email')
exclude = ('published',)
class CommentAdmin(admin.ModelAdmin):
list_display = ('id', 'section', 'author_name', 'content')
list_filter = ('section__hearing__slug',)
search_fields = ('section__id', 'author_name', 'title', 'content')
fields = ('title', 'content', 'reply_to', 'author_name', 'organization', 'geojson', 'map_comment_text',
'plugin_identifier', 'plugin_data', 'pinned', 'label', 'language_code', 'voters', 'section',
'created_by_user')
readonly_fields = ('reply_to', 'author_name', 'organization', 'geojson',
'plugin_identifier', 'plugin_data', 'label', 'language_code', 'voters', 'section',
'created_by_user')
def created_by_user(self, obj):
# returns a link to the user that created the comment.
if obj.created_by_id and get_user_model().objects.get(id=obj.created_by_id):
user_url = reverse("admin:app_list", args=['kerrokantasi'])
user_url += "user/{}/change/".format(obj.created_by_id)
user_info = "{} - {}".format(obj.created_by.get_display_name(),
get_user_model().objects.get(id=obj.created_by_id).email)
return format_html(
'<a href="{}">{}</a>', user_url, user_info
)
def delete_queryset(self, request, queryset):
# this method is called by delete_selected and can be overridden
for comment in queryset:
comment.soft_delete()
def delete_model(self, request, obj):
# this method is called by the admin form and can be overridden
obj.soft_delete()
class ProjectPhaseInline(TranslatableStackedInline, NestedStackedInline):
model = models.ProjectPhase
extra = 1
class ProjectAdmin(TranslatableAdmin, admin.ModelAdmin):
list_display = ('title_localized', 'identifier')
search_fields = ('title', 'identifier')
inlines = (ProjectPhaseInline,)
def title_localized(self, obj):
return get_any_language(obj, 'title')
title_localized.short_description = 'Title'
class ProjectPhaseAdmin(TranslatableAdmin, admin.ModelAdmin):
list_display = ('title_localized', 'project')
list_filter = ('project',)
search_fields = ('title', 'project__title')
def title_localized(self, obj):
return get_any_language(obj, 'title')
title_localized.short_description = 'Title'
def get_any_language(obj, attr_name):
""" Get a string of at least some language, if the attribute is not
translated to the current language or the translations are empty strings.
"""
translation = obj.safe_translation_getter(attr_name)
if not translation:
for lang in settings.PARLER_LANGUAGES[None]:
translation = obj.safe_translation_getter(attr_name, language_code=lang['code'])
if translation:
break
return translation
# Wire it up!
admin.site.register(models.Label, LabelAdmin)
admin.site.register(models.Hearing, HearingAdmin)
admin.site.register(models.SectionType, SectionTypeAdmin)
admin.site.register(models.Organization, OrganizationAdmin)
admin.site.register(models.ContactPerson, ContactPersonAdmin)
admin.site.register(models.Project, ProjectAdmin)
admin.site.register(models.ProjectPhase, ProjectPhaseAdmin)
admin.site.register(models.SectionComment, CommentAdmin)
| {
"repo_name": "City-of-Helsinki/kerrokantasi",
"path": "democracy/admin/__init__.py",
"copies": "1",
"size": "15122",
"license": "mit",
"hash": -410380793452284900,
"line_mean": 37.7666666667,
"line_max": 114,
"alpha_frac": 0.6449500628,
"autogenerated": false,
"ratio": 4.102849389416553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5247799452216553,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import Counter
from ...external.qt.QtGui import (QWidget, QSlider, QLabel, QComboBox,
QHBoxLayout, QVBoxLayout)
from ...external.qt.QtCore import Qt, Signal
from ..widget_properties import (TextProperty,
ValueProperty,
CurrentComboProperty)
class SliceWidget(QWidget):
label = TextProperty('_ui_label')
slice_center = ValueProperty('_ui_slider')
mode = CurrentComboProperty('_ui_mode')
slice_changed = Signal(int)
mode_changed = Signal(str)
def __init__(self, label='', pix2world=None, lo=0, hi=10,
parent=None, aggregation=None):
super(SliceWidget, self).__init__(parent)
if aggregation is not None:
raise NotImplemented("Aggregation option not implemented")
if pix2world is not None:
raise NotImplemented("Pix2world option not implemented")
layout = QVBoxLayout()
layout.setContentsMargins(3, 1, 3, 1)
top = QHBoxLayout()
top.setContentsMargins(3, 3, 3, 3)
label = QLabel(label)
top.addWidget(label)
mode = QComboBox()
mode.addItem('x', 'x')
mode.addItem('y', 'y')
mode.addItem('slice', 'slice')
mode.currentIndexChanged.connect(lambda x:
self.mode_changed.emit(self.mode))
mode.currentIndexChanged.connect(self._update_mode)
top.addWidget(mode)
layout.addLayout(top)
slider = QSlider(Qt.Horizontal)
slider.setMinimum(lo)
slider_lbl = QLabel()
slider.setMaximum(hi)
slider.setValue((lo + hi) / 2)
slider.valueChanged.connect(lambda x:
self.slice_changed.emit(self.mode))
slider.valueChanged.connect(lambda x: slider_lbl.setText(str(x)))
layout.addWidget(slider_lbl)
layout.addWidget(slider)
self.setLayout(layout)
self._ui_label = label
self._ui_slider = slider
self._slider_lbl = slider_lbl
self._ui_mode = mode
self._update_mode()
self._frozen = False
def _update_mode(self, *args):
if self.mode != 'slice':
self._ui_slider.hide()
self._slider_lbl.hide()
else:
self._ui_slider.show()
self._slider_lbl.show()
def freeze(self):
self.mode = 'slice'
self._ui_mode.setEnabled(False)
self._ui_slider.hide()
self._frozen = True
@property
def frozen(self):
return self._frozen
class DataSlice(QWidget):
"""
A DatSlice widget provides an inteface for selection
slices through an N-dimensional dataset
Signals
-------
slice_changed : triggered when the slice through the data changes
"""
slice_changed = Signal()
def __init__(self, data=None, parent=None):
"""
:param data: :class:`~glue.core.Data` instance, or None
"""
super(DataSlice, self).__init__(parent)
self._slices = []
layout = QVBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(0, 3, 0, 3)
self.layout = layout
self.setLayout(layout)
self.set_data(data)
def _clear(self):
for _ in range(self.layout.count()):
self.layout.takeAt(0)
for s in self._slices:
s.close()
self._slices = []
def set_data(self, data):
"""
Change datasets
:parm data: :class:`~glue.core.Data` instance
"""
self._clear()
if data is None:
self.ndim = 0
return
self.ndim = len(data.shape)
if self.ndim < 3:
return
for i, s in enumerate(data.shape):
slider = SliceWidget(data.get_world_component_id(i).label,
hi=s - 1)
if i == self.ndim - 1:
slider.mode = 'x'
elif i == self.ndim - 2:
slider.mode = 'y'
else:
slider.mode = 'slice'
self._slices.append(slider)
# save ref to prevent PySide segfault
self.__on_slice = partial(self._on_slice, i)
self.__on_mode = partial(self._on_mode, i)
slider.slice_changed.connect(self.__on_slice)
slider.mode_changed.connect(self.__on_mode)
if s == 1:
slider.freeze()
for s in self._slices[::-1]:
self.layout.addWidget(s)
self.layout.addStretch(5)
def _on_slice(self, index, slice_val):
self.slice_changed.emit()
def _on_mode(self, index, mode_index):
s = self.slice
def isok(ss):
# valid slice description: 'x' and 'y' both appear
c = Counter(ss)
return c['x'] == 1 and c['y'] == 1
if isok(s):
self.slice_changed.emit()
return
for i in range(len(s)):
if i == index:
continue
if self._slices[i].frozen:
continue
for mode in 'x', 'y', 'slice':
if self._slices[i].mode == mode:
continue
ss = list(s)
ss[i] = mode
if isok(ss):
self._slices[i].mode = mode
return
else:
raise RuntimeError("Corrupted Data Slice")
@property
def slice(self):
"""
A description of the slice through the dataset
A tuple of lenght equal to the dimensionality of the data
Each element is an integer, 'x', or 'y'
'x' and 'y' indicate the horizontal and vertical orientation
of the slice
"""
if self.ndim < 3:
return {0: tuple(), 1: ('x',), 2: ('y', 'x')}[self.ndim]
return tuple(s.mode if s.mode != 'slice' else s.slice_center
for s in self._slices)
@slice.setter
def slice(self, value):
for v, s in zip(value, self._slices):
if v in ['x', 'y']:
s.mode = v
else:
s.mode = 'slice'
s.slice_center = v
| {
"repo_name": "bsipocz/glue",
"path": "glue/qt/widgets/data_slice_widget.py",
"copies": "1",
"size": "6320",
"license": "bsd-3-clause",
"hash": 775078417938795400,
"line_mean": 27.9908256881,
"line_max": 75,
"alpha_frac": 0.5224683544,
"autogenerated": false,
"ratio": 4.1172638436482085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139732198048208,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import defaultdict, namedtuple
import os
import threading
import sublime_plugin
import sublime
p = namedtuple('Project', ['name', 'match'])
#How many files do we want to keep in our history
number_of_recent_files = 500
#put this in front of directories to make it easy to filter for directories
directory_prefix = '`'
#define the paths to where we store our collections
#you can add more collections just remeber to update your keymap
collections = {
'favorites': os.path.join(os.getcwd(), 'collections', 'favorites_%s.txt'),
'index': os.path.join(os.getcwd(), 'collections', 'index_%s.txt'),
'recent': os.path.join(os.getcwd(), 'collections', 'recent_%s.txt'),
'projects': os.path.join(os.getcwd(), 'collections', 'projects.txt')
}
#how much of the filepath do you want to show
file_path_depth = {
'default': 3,
#define per collection
'favorites': 4,
'recent': 0,
'index': 4
}
#store path here for persistence's sake
path = ''
def get_change_dir_string(path):
'''Return a formatted CD string
Given a path this function returns a string of the form
/path/to/(parent)/current
'''
path, file_name = os.path.split(path)
if path and file_name:
tail, head = os.path.split(path)
if head:
return os.path.join(tail, "(%s)" % head, file_name)
else:
return os.path.join("(%s)" % path.strip('\\/'), file_name)
else:
return 'Home Directory (%s)' % path
class SublimeException(Exception):
def __init__(self, message):
sublime.status_message(message)
#don't list files with these extensions (except in collections)
excluded_extensions = [
'.pyc',
'.png',
'.jpg'
]
#A reuseable functions to read files paths from a text file.
#If the text file does not exists we create it and return an empty list.
def get_list_from_file(file_name, count = -1):
output = []
try:
with open(file_name) as collection:
for path in collection.readlines():
output.append(path.strip())
count -= 1
if count == 0:
break
except IOError:
#create the file if it didn't exist
with open(file_name, 'w') as f:
pass
return output
def write_list_to_file(input, file_path):
#write a list to a file, one element per line
with open(file_path, 'w') as output:
output.write('\n'.join(input))
def valid_file(path):
for extension in excluded_extensions:
if path.endswith(extension):
return False
return True
def create_index(path, name):
output = []
with open(collections['index'] % name, 'w') as index:
for folder, subs, files in os.walk(path, followlinks = True):
for filename in files:
output.append(os.path.join(folder, filename))
write_list_to_file(output, collections['index'] % name)
class IndexBuilder(threading.Thread):
def __init__(self, projects):
#allow us to pass in a single project or many projects
self.projects = projects if isinstance(projects, list) else [projects]
def run(self):
for project in self.projects:
sublime.status_message("Creating index for {project}...".format(project = project[1]))
try:
create_index(*project)
except:
SublimeException("Error creating index for {project} in {path}".format(
project = project[1],
path = project[0]
))
sublime.status_message("Finished building index for %s" % project.name)
sublime.status_message("Finished building indexes")
def create_project(input):
data = [x.strip() for x in input.split(':')]
name = data.pop(0).strip()
match = data.pop() if data else ''
if match: create_index(match, name)
return p(name, match)
def init_projects():
projects = [create_project(x) for x in get_list_from_file(collections['projects'])]
current_project = projects[0] if projects else create_project('default')
IndexBuilder(current_project).run()
return projects, current_project
def load_all_indexes():
projects = []
with open(collections['projects']) as fh:
for line in fh.readlines():
project = map(lambda x: x.strip(), line.split(":"))
projects.append(project)
IndexBuilder(projects).run()
#we get the last project that we had open
#This must be stored as a global so that when we close/open/favorite a new file
#it is added to the correct 'recent' collection
projects, current_project = init_projects();
class FindCommand(sublime_plugin.TextCommand):
def run(self, edit, command = 'open', collection = False):
self.window = sublime.active_window()
self.file_name = self.view.file_name()
#if no filename then we use the path from the previous file
self.path = os.path.dirname(self.file_name or path)
return {
'new_file': self.file_prompt,
'new_directory': self.directory_prompt,
'delete_file': self.delete_file,
'show_collection': partial(self.show_collection, collection),
'project': self.show_projects,
'open': partial(self.change_directory, self.path)
}[command]()
#List all files/directories in the current directory.
def list_files(self):
'''
Show all non excluded files located in the directory self.path
We don't just show the raw file name we show:
1. Special Commands (cd, new file, etc)
2. For Search we display
File:
File Name
Shortened Path
Directory:
<directory_prefix>Directory Name
Shortened Path
'''
common_commands = [
['Change Directory', get_change_dir_string(self.path)],
['Other Options', 'New File/Folder | Recent/Favorites | Switch Project']
]
file_names = [f for f in os.listdir(self.path) if valid_file(f)]
self.files = [os.path.join(self.path, f) for f in file_names]
display_names = create_display_names(self.files, file_path_depth['default'])
self.window.show_quick_panel(common_commands + display_names, self.handle_input)
#Call back function after the user select what file/command to open
def handle_input(self, command):
if command == -1:
return #nothing selected do nothing
elif command < 2:
return [
self.go_back_directory,
self.custom_commands
][command]() #call the appropriate command based on index
#they selected a file so we grab its path
path = self.files[command - 2]
self.open_path(path)
def custom_commands(self, command = None):
if command is None:
return self.window.show_quick_panel([
['Back To Files', 'View Files in Current Directory'],
['New File', 'Create a new file'],
['New Folder', 'Create a new folder'],
['Recent Files', 'View recent files'],
['Favorite Files/Folders', 'View favorite files'],
['Switch Project', 'Switch Between Projects']
], self.custom_commands)
if command == -1:
return #nothing selected do nothing
return [
self.list_files,
self.file_prompt,
self.directory_prompt,
partial(self.show_collection, 'recent'),
partial(self.show_collection, 'favorites'),
self.show_projects
][command]()
def show_projects(self):
self.window.show_quick_panel(
[['New Project', 'Create a New Project']] + [[p.name, p.match or 'Match Everything'] for p in projects],
self.change_project
)
def change_project(self, project_number):
global current_project
if project_number < 0:
return
if project_number == 0:
self.create_project()
else:
current_project = projects[project_number - 1]
projects.remove(current_project)
projects.insert(0, current_project)
self.update_projects()
IndexBuilder(current_project).run()
self.show_collection('index')
def create_project(self, response=None):
global current_project
if response == None:
self.prompt("Create a New Project", self.create_project, '')
return
project = create_project(response)
if project.name in [x.name for x in projects]:
sublime.status_message(project.name + " is already a project")
else:
projects.insert(0, project)
self.update_projects()
current_project = project
sublime.status_message("Project '%s' created!" % project.name)
self.show_collection('index')
def update_projects(self):
write_list_to_file(
["%s:%s" % (p.name, p.match) for p in projects],
collections['projects']
)
def show_collection(self, collection):
'''
Show all files in one of our collections, favorites, recent, etc.
We do some interesting stuff to the file paths to make them as short as
possible without loosing the ability to effectively search them.
1) If a file name is unique we show it in the form:
Filename
Shortend Path
2) If there are multiple files with the same name we find the shortest
unique path for each file and display that in the form:
Filename [short unique path]
Shortened Path
This allows you to refine your search in reverse order, instead of having to
back track.
'''
print collections[collection] % current_project.name
self.files = get_list_from_file(collections[collection] % current_project.name)
search_names = [format_path_for_search(p) for p in get_unique_suffixes(self.files)]
short_paths = [shorten_path(p, file_path_depth[collection]) for p in self.files]
self.window.show_quick_panel(
map(list, zip(search_names, short_paths)),
self.open_collection
)
def open_collection(self, file_number):
if file_number < 0:
return
path = self.files[file_number]
self.open_path(path)
def file_prompt(self):
self.prompt("Create a New File", self.open_path)
def open_path(self, path):
if not path:
return
if os.path.isdir(path):
self.change_directory(path)
else:
self.window.open_file(path)
def delete_file(self, confirm = -1):
if confirm == -1:
self.prompt(
"Delete File: %s [blank/\"no\" to cancel]" % self.file_name,
self.delete_file,
''
)
elif confirm.lower() == 'yes':
if os.path.exists(self.file_name):
os.remove(self.file_name)
else:
raise SublimeException("Path %s is not a file" % self.file_name)
def directory_prompt(self):
self.prompt("Create a New Directory", self.create_directory)
def create_directory(self, path):
if os.path.exists(path):
sublime.status_message(path + " already exists...")
else:
os.makedirs(path)
sublime.status_message(path + " succesfully created...")
self.change_directory(path)
def prompt(self, title, follow, path = "default"):
if path == "default":
path = os.path.join(self.path, '')
self.window.show_input_panel(
title,
path,
follow,
None,
None
)
def go_back_directory(self):
parent_path = os.path.split(self.path)[0]
self.change_directory(parent_path)
def change_directory(self, new_path):
global path
self.path = new_path
path = self.path
self.list_files()
#Add file path to recent everytime we open/close it
class RecentCommand(sublime_plugin.EventListener):
def on_close(self, view):
self.update_recent(view.file_name())
def on_load(self, view):
self.update_recent(view.file_name())
def update_recent(self, file_name):
collection_name = self.get_collection(file_name)
if not file_name:
return
paths = get_list_from_file(collection_name, number_of_recent_files)
recent = [path for path in paths if path.lower() != file_name.lower()]
recent.insert(0, file_name)
write_list_to_file(recent, collection_name)
def get_collection(self, file_name):
for project in projects:
if project.match in file_name:
project_name = project.name
break
else:
project_name = 'default'
return collections['recent'] % project_name
#Add curent file, or it's parent folder to Favorites
class FavoriteCommand(sublime_plugin.TextCommand):
def run(self, edit, command = False):
file_name = self.view.file_name()
if not file_name:
return
if command == "parent_folder":
file_name = os.path.dirname(file_name)
self.add_to_favorites(file_name)
def add_to_favorites(self, file_name):
collection_name = collections['favorites'] % current_project
favorites = get_list_from_file(collection_name)
if file_name in favorites:
sublime.status_message(file_name + " already in favorites...")
else:
favorites.insert(0, file_name)
write_list_to_file(favorites, collection_name)
sublime.status_message(file_name + " added to favorites...")
#shorten a file path to only show the lowest "depth" number of folders
#e.g. shorten_path('Z:\folder_a\folder_b\folder_c\folder_d\file.py', 2) => '..\folder_c\folder_d\file.py'
def shorten_path(path, depth = 2):
'''
Shorten file path
shorten a file path to only show the lowest "depth" number of folders
e.g. shorten_path('Z:\folder_a\folder_b\folder_c\folder_d\file.py', 2) => '..\folder_c\folder_d\file.py'
'''
if not depth:
return path
tail, head = os.path.split(path)
output = [head]
for i in range(depth):
tail, head = os.path.split(tail)
if head:
output.append(head)
elif tail:
output.append(tail)
else:
break
else:
output.append('..')
return os.path.join(*reversed(output))
def create_display_names(paths, depth = 0):
display_names = []
for path in paths:
#be able to quickly filter for directories, helpful if you are trying to walk the
#directory tree. This could be slow in huge directories, it may be quicker to just
#check for a file extension, it wouldn't be perfect, but it is probably good enough
prefix = directory_prefix if os.path.isdir(path) else ""
tail, file_name = os.path.split(path)
display_names.append([prefix + (file_name or 'Home (%s)' % path), shorten_path(tail, depth)])
return display_names
def get_unique_suffixes(paths):
'''
Find the shortest unique path to for every file in a list
Given a list of file paths this reutnrs a list of the shortest
unique suffix to represent each file, e.g.
Input: a/b/c/foo.py, x/y/z/foo.py, a/b/z/foo.py
Output c/foo.py, y/z/foo.py, b/z/foo.py
'''
p = [{'suffix': i, 'path': i} for i in set(paths)]
suffixes = _get_unique_suffixes(p, '')
path_map = dict((i[1], i[0]) for i in suffixes)
return [path_map[path] or path for path in paths]
def _get_unique_suffixes(paths, end):
output, suffixes = [], defaultdict(list)
for path in paths:
tail, head = os.path.split(path['suffix'])
suffixes[head].append({
'suffix': tail,
'path': path['path']
})
for suffix, values in suffixes.items():
unique_values = []
for value in values:
if value['path'] == value['suffix']:
output.append(value.values())
else:
unique_values.append(value)
if len(unique_values) == 1:
short_path = os.path.join(suffix, end) if end else suffix
output.append([short_path, unique_values.pop()['path']])
else:
new_end = os.path.join(suffix, end) if end else suffix
output += _get_unique_suffixes(unique_values, new_end)
return output
def format_path_for_search(path):
'''
Format a string for search
To make it easier to search a string we format it:
folder_a/folder_b/filename -->
filname [folder_a/folder_b/filename]
or if there are no folders we just return the full path
This is easier to search because we search left to right. So if you are looking
for a file called /foo/bar.py but there are 50 files named bar.py (you don't know this).
This format:
bar -> 50 results
barfoo -> 1 results
Standard Format:
bar -> 50 results
delete search
foobar -> 1 result
'''
tail, head = os.path.split(path)
prefix = directory_prefix if "." not in path else ""
if tail and head:
return "%s%s [%s]" % (prefix, head, os.path.join('..', path))
if tail:
return 'Home (%s)' % path
else:
return prefix + path | {
"repo_name": "toddsifleet/sublime_open",
"path": "sublime_open.py",
"copies": "1",
"size": "18027",
"license": "mit",
"hash": 3598291995291298300,
"line_mean": 33.0150943396,
"line_max": 116,
"alpha_frac": 0.5887834914,
"autogenerated": false,
"ratio": 4.008672448298866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097455939698866,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import defaultdict
from time import time, sleep
from itertools import count as itercount
try:
from itertools import imap
except ImportError:
# In python3, map is now lazy.
imap = map
try:
from itertools import ifilter
except ImportError:
# In python3, map is now lazy.
ifilter = filter
try:
#Python2
rawtype = str
safetype = unicode
raw2str = lambda r: r.decode('utf-8')
str2raw = lambda s: s.encode('utf-8')
except:
#Python3
rawtype = bytes
safetype = str
raw2str = lambda r: r.decode('utf-8')
str2raw = lambda s: s.encode('utf-8')
def ingest(d):
"""Convert rawtype (str py27 or bytes py3x) to safetype (unicode py27 or str py3x)"""
if isinstance(d, rawtype):
return raw2str(d)
elif isinstance(d, safetype):
return d
else:
raise TypeError("Can't ingest data of type %s" % type(d))
def egest(s):
"""Convert safetype (unicode py27, str py3x) to rawtype (str py27 or bytes py3x)"""
if isinstance(s, rawtype):
return s
elif isinstance(s, safetype): # On python 2 str is bytes.
return str2raw(s)
else:
raise TypeError("Can't egest data of type %s" % type(s))
def empty_default(xs, default):
""""
If zero length array is passed, returns default.
Otherwise returns the origional array.
"""
xs = list(xs)
if len(xs) == 0:
return list(default)
else:
return xs
def compose(f, g):
return lambda *args, **kwargs: f(g(*args, **kwargs))
def composeFunctor(f,g):
return lambda x: f(g(x))
def concat(l):
for sublist in l:
for item in sublist:
yield item
def concatMap(func):
return compose(concat, partial(imap, func))
def fmap(func):
def mapped(collection):
return imap(func, collection)
return mapped
def ffilter(func):
def filtered(collection):
return ifilter(func, collection)
return filtered
def identity(x):
return x
def groupby(func, l):
groups = defaultdict(list)
for i in l:
groups[func(i)].append(i)
return list(groups.items())
def take(count):
def taker(collection):
remaining = count
i = iter(collection)
while remaining > 0:
try:
yield next(i)
except StopIteration:
return
remaining = remaining - 1
return taker
def consume(collection):
for _ in collection:
pass
def uniq(l):
seen = set()
for i in l:
if i in seen:
continue
else:
seen.add(i)
yield i
def irange(start, increment):
while True :
yield start
start += increment
def invert(v):
return not(v)
#TODO why not len?
def count(iterator):
c = 0
for v in iterator:
c+=1
return c
def uncurry(func):
"""Wraps func so that the first arg is expanded into list args."""
def uncurried(list_args, **kwargs):
return func(*list_args, **kwargs)
return uncurried
def curry(func):
""""Wraps func so that a series of args are turned into a single arg list."""
def curried(*args, **kwargs):
return func(args, **kwargs)
return curried
def identify(func):
"""Wrap func so that it returns what comes in."""
def identified(arg):
func(arg)
return arg
return identified
def pipeline(*funcs):
if funcs:
foo = funcs[0]
rest = funcs[1:]
if rest:
next_hop = pipeline(*rest)
def pipe(*args, **kwargs):
return next_hop(foo(*args, **kwargs))
return pipe
else: # no rest, foo is final function.
return foo
else: # no funcs at all.
return fmap(identity)
def zipFrom(a, bs):
"""Converts a value and list into a list of tuples: a -> [b] -> [(a,b)]"""
for b in bs:
yield (a, b)
def dot(fn):
"""Reverses the dot syntax (object.attr), so you can do dot(attr)(obj)."""
def access(obj):
return getattr(obj, fn)
return access
def nth(n):
def nth_getter(lst):
return lst[n]
return nth_getter
first = nth(0)
second = nth(1)
def maybe(default, v):
if v:
return v
else:
return default
def every(predicate, coll):
for x in coll:
if not predicate(x):
return False
return True
def repeater(callback, period=0, max_tries=None, max_time=None, predicate = identity, catch_predicate = lambda e: False):
def repeat_worker(*args, **kwargs):
if max_time is not None:
deadline = time() + max_time
else:
deadline = None
if max_tries is None:
r = itercount()
else:
r = range(0, max_tries)
for i in r:
start_time = time()
threw = False
try:
ret = callback(*args, **kwargs)
except Exception as e:
# An exception was caught, so we failed.
if catch_predicate(e):
# This exception was expected. So we failed, but might need retry.
threw = True
else:
# This exception was unexpected, lets re-throw.
raise
if not threw and predicate(ret):
# We didn't throw, and got a success! Exit.
return True
if deadline is not None and time() > deadline:
return False
end_time = time()
sleep_time = max(0.0, period - (end_time - start_time))
sleep(sleep_time)
# We fell through to here, fail.
return False
return repeat_worker
| {
"repo_name": "andrewguy9/farmfs",
"path": "farmfs/util.py",
"copies": "1",
"size": "5276",
"license": "mit",
"hash": 1065105555760289500,
"line_mean": 22.1403508772,
"line_max": 121,
"alpha_frac": 0.6231993935,
"autogenerated": false,
"ratio": 3.5103127079174983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9502517887398135,
"avg_score": 0.026198842803872673,
"num_lines": 228
} |
from functools import partial
from collections import defaultdict
import json
import warnings
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from ....utils import getargspec
from ..utils import _get_pyarrow_dtypes, _meta_from_dtypes
from ...utils import clear_known_categories
from ....core import flatten
from dask import delayed
from .utils import (
_parse_pandas_metadata,
_normalize_index_columns,
Engine,
_analyze_paths,
)
preserve_ind_supported = pa.__version__ >= LooseVersion("0.15.0")
schema_field_supported = pa.__version__ >= LooseVersion("0.15.0")
#
# Private Helper Functions
#
def _append_row_groups(metadata, md):
try:
metadata.append_row_groups(md)
except RuntimeError as err:
if "requires equal schemas" in str(err):
raise RuntimeError(
"Schemas are inconsistent, try using "
'`to_parquet(..., schema="infer")`, or pass an explicit '
"pyarrow schema. Such as "
'`to_parquet(..., schema={"column1": pa.string()})`'
) from err
else:
raise err
def _write_partitioned(
table, root_path, filename, partition_cols, fs, index_cols=(), **kwargs
):
"""Write table to a partitioned dataset with pyarrow.
Logic copied from pyarrow.parquet.
(arrow/python/pyarrow/parquet.py::write_to_dataset)
TODO: Remove this in favor of pyarrow's `write_to_dataset`
once ARROW-8244 is addressed.
"""
fs.mkdirs(root_path, exist_ok=True)
df = table.to_pandas(ignore_metadata=True)
index_cols = list(index_cols) if index_cols else []
preserve_index = False
if index_cols and preserve_ind_supported:
df.set_index(index_cols, inplace=True)
preserve_index = True
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis="columns")
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0 and not index_cols:
raise ValueError("No data left to save outside partition columns")
subschema = table.schema
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
md_list = []
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = fs.sep.join(
[
"{colname}={value}".format(colname=name, value=val)
for name, val in zip(partition_cols, keys)
]
)
subtable = pa.Table.from_pandas(
subgroup, preserve_index=preserve_index, schema=subschema, safe=False
)
prefix = fs.sep.join([root_path, subdir])
fs.mkdirs(prefix, exist_ok=True)
full_path = fs.sep.join([prefix, filename])
with fs.open(full_path, "wb") as f:
pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)
md_list[-1].set_file_path(fs.sep.join([subdir, filename]))
return md_list
def _index_in_schema(index, schema):
if index and schema is not None:
# Make sure all index columns are in user-defined schema
return len(set(index).intersection(schema.names)) == len(index)
elif index:
return True # Schema is not user-specified, all good
else:
return False # No index to check
def _get_dataset_object(paths, fs, filters, dataset_kwargs):
"""Generate a ParquetDataset object"""
kwargs = dataset_kwargs.copy()
if "validate_schema" not in kwargs:
kwargs["validate_schema"] = False
if len(paths) > 1:
# This is a list of files
base, fns = _analyze_paths(paths, fs)
proxy_metadata = None
if "_metadata" in fns:
# We have a _metadata file. PyArrow cannot handle
# "_metadata" when `paths` is a list. So, we shuld
# open "_metadata" separately.
paths.remove(fs.sep.join([base, "_metadata"]))
fns.remove("_metadata")
with fs.open(fs.sep.join([base, "_metadata"]), mode="rb") as fil:
proxy_metadata = pq.ParquetFile(fil).metadata
# Create our dataset from the list of data files.
# Note #1: that this will not parse all the files (yet)
# Note #2: Cannot pass filters for legacy pyarrow API (see issue#6512).
# We can handle partitions + filtering for list input after
# adopting new pyarrow.dataset API.
dataset = pq.ParquetDataset(paths, filesystem=fs, **kwargs)
if proxy_metadata:
dataset.metadata = proxy_metadata
elif fs.isdir(paths[0]):
# This is a directory. We can let pyarrow do its thing.
# Note: In the future, it may be best to avoid listing the
# directory if we can get away with checking for the
# existence of _metadata. Listing may be much more
# expensive in storage systems like S3.
allpaths = fs.glob(paths[0] + fs.sep + "*")
base, fns = _analyze_paths(allpaths, fs)
dataset = pq.ParquetDataset(paths[0], filesystem=fs, filters=filters, **kwargs)
else:
# This is a single file. No danger in gathering statistics
# and/or splitting row-groups without a "_metadata" file
base = paths[0]
fns = [None]
dataset = pq.ParquetDataset(paths[0], filesystem=fs, **kwargs)
return dataset, base, fns
def _gather_metadata(
paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs
):
"""Gather parquet metadata into a single data structure.
Use _metadata or aggregate footer metadata into a single
object. Also, collect other information necessary for
parquet-to-ddf mapping (e.g. schema, partition_info).
"""
# Step 1: Create a ParquetDataset object
dataset, base, fns = _get_dataset_object(paths, fs, filters, dataset_kwargs)
if fns == [None]:
# This is a single file. No danger in gathering statistics
# and/or splitting row-groups without a "_metadata" file
if gather_statistics is None:
gather_statistics = True
if split_row_groups is None:
split_row_groups = True
# Step 2: Construct necessary (parquet) partitioning information
partition_info = {"partitions": None, "partition_keys": {}, "partition_names": []}
fn_partitioned = False
if dataset.partitions is not None:
fn_partitioned = True
partition_info["partition_names"] = [
n for n in dataset.partitions.partition_names if n is not None
]
partition_info["partitions"] = dataset.partitions
for piece in dataset.pieces:
partition_info["partition_keys"][piece.path] = piece.partition_keys
# Step 3: Construct a single `metadata` object. We can
# directly use dataset.metadata if it is available.
# Otherwise, if `gather_statistics` or `split_row_groups`,
# we need to gether the footer metadata manually
metadata = None
if dataset.metadata:
# We have a _metadata file.
# PyArrow already did the work for us
schema = dataset.metadata.schema.to_arrow_schema()
if gather_statistics is None:
gather_statistics = True
if split_row_groups is None:
split_row_groups = True
return (
schema,
dataset.metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
else:
# No _metadata file.
# May need to collect footer metadata manually
if dataset.schema is not None:
schema = dataset.schema.to_arrow_schema()
else:
schema = None
if gather_statistics is None:
gather_statistics = False
if split_row_groups is None:
split_row_groups = False
metadata = None
if not (split_row_groups or gather_statistics):
# Don't need to construct real metadata if
# we are not gathering statistics or splitting
# by row-group
metadata = [p.path for p in dataset.pieces]
if schema is None:
schema = dataset.pieces[0].get_metadata().schema.to_arrow_schema()
return (
schema,
metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
# We have not detected a _metadata file, and the user has specified
# that they want to split by row-group and/or gather statistics.
# This is the only case where we MUST scan all files to collect
# metadata.
for piece, fn in zip(dataset.pieces, fns):
md = piece.get_metadata()
if schema is None:
schema = md.schema.to_arrow_schema()
if fn_partitioned:
md.set_file_path(piece.path.replace(base + fs.sep, ""))
elif fn:
md.set_file_path(fn)
if metadata:
_append_row_groups(metadata, md)
else:
metadata = md
return (
schema,
metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
def _generate_dd_meta(schema, index, categories, partition_info):
partition_obj = partition_info["partitions"]
partitions = partition_info["partition_names"]
columns = None
has_pandas_metadata = schema.metadata is not None and b"pandas" in schema.metadata
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(pandas_metadata)
if categories is None:
categories = []
for col in pandas_metadata["columns"]:
if (col["pandas_type"] == "categorical") and (
col["name"] not in categories
):
categories.append(col["name"])
else:
# No pandas metadata implies no index, unless selected by the user
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
if index is None and index_names:
index = index_names
if set(column_names).intersection(partitions):
raise ValueError(
"partition(s) should not exist in columns.\n"
"categories: {} | partitions: {}".format(column_names, partitions)
)
column_names, index_names = _normalize_index_columns(
columns, column_names + partitions, index, index_names
)
all_columns = index_names + column_names
# Check that categories are included in columns
if categories and not set(categories).intersection(all_columns):
raise ValueError(
"categories not in available columns.\n"
"categories: {} | columns: {}".format(categories, list(all_columns))
)
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
index_cols = index or ()
meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)
if categories:
# Make sure all categories are set to "unknown".
# Cannot include index names in the `cols` argument.
meta = clear_known_categories(
meta, cols=[c for c in categories if c not in meta.index.names]
)
if partition_obj:
for partition in partition_obj:
if isinstance(index, list) and partition.name == index[0]:
# Index from directory structure
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=index[0]
)
elif partition.name == meta.index.name:
# Index created from a categorical column
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=meta.index.name
)
elif partition.name in meta.columns:
meta[partition.name] = pd.Series(
pd.Categorical(categories=partition.keys, values=[]),
index=meta.index,
)
return meta, index_cols, categories, index
def _aggregate_stats(
file_path, file_row_group_stats, file_row_group_column_stats, stat_col_indices
):
"""Utility to aggregate the statistics for N row-groups
into a single dictionary.
"""
if len(file_row_group_stats) < 1:
# Empty statistics
return {}
elif len(file_row_group_column_stats) == 0:
assert len(file_row_group_stats) == 1
return file_row_group_stats[0]
else:
# Note: It would be better to avoid df_rgs and df_cols
# construction altogether. It makes it fast to aggregate
# the statistics for many row groups, but isn't
# worthwhile for a small number of row groups.
if len(file_row_group_stats) > 1:
df_rgs = pd.DataFrame(file_row_group_stats)
s = {
"file_path_0": file_path,
"num-rows": df_rgs["num-rows"].sum(),
"total_byte_size": df_rgs["total_byte_size"].sum(),
"columns": [],
}
else:
s = {
"file_path_0": file_path,
"num-rows": file_row_group_stats[0]["num-rows"],
"total_byte_size": file_row_group_stats[0]["total_byte_size"],
"columns": [],
}
df_cols = None
if len(file_row_group_column_stats) > 1:
df_cols = pd.DataFrame(file_row_group_column_stats)
for ind, name in enumerate(stat_col_indices):
i = ind * 3
if df_cols is None:
s["columns"].append(
{
"name": name,
"min": file_row_group_column_stats[0][i],
"max": file_row_group_column_stats[0][i + 1],
"null_count": file_row_group_column_stats[0][i + 2],
}
)
else:
s["columns"].append(
{
"name": name,
"min": df_cols.iloc[:, i].min(),
"max": df_cols.iloc[:, i + 1].max(),
"null_count": df_cols.iloc[:, i + 2].sum(),
}
)
return s
def _process_metadata(
metadata, single_rg_parts, gather_statistics, stat_col_indices, no_filters
):
# Get the number of row groups per file
file_row_groups = defaultdict(list)
file_row_group_stats = defaultdict(list)
file_row_group_column_stats = defaultdict(list)
cmax_last = {}
for rg in range(metadata.num_row_groups):
row_group = metadata.row_group(rg)
fpath = row_group.column(0).file_path
if fpath is None:
raise ValueError(
"Global metadata structure is missing a file_path string. "
"If the dataset includes a _metadata file, that file may "
"have one or more missing file_path fields."
)
if file_row_groups[fpath]:
file_row_groups[fpath].append(file_row_groups[fpath][-1] + 1)
else:
file_row_groups[fpath].append(0)
if gather_statistics:
if single_rg_parts:
s = {
"file_path_0": fpath,
"num-rows": row_group.num_rows,
"total_byte_size": row_group.total_byte_size,
"columns": [],
}
else:
s = {
"num-rows": row_group.num_rows,
"total_byte_size": row_group.total_byte_size,
}
cstats = []
for name, i in stat_col_indices.items():
column = row_group.column(i)
if column.statistics:
cmin = column.statistics.min
cmax = column.statistics.max
cnull = column.statistics.null_count
last = cmax_last.get(name, None)
if no_filters:
# Only think about bailing if we don't need
# stats for filtering
if cmin is None or (last and cmin < last):
# We are collecting statistics for divisions
# only (no filters) - Column isn't sorted, or
# we have an all-null partition, so lets bail.
#
# Note: This assumes ascending order.
#
gather_statistics = False
file_row_group_stats = {}
file_row_group_column_stats = {}
break
if single_rg_parts:
to_ts = column.statistics.logical_type.type == "TIMESTAMP"
s["columns"].append(
{
"name": name,
"min": cmin if not to_ts else pd.Timestamp(cmin),
"max": cmax if not to_ts else pd.Timestamp(cmax),
"null_count": cnull,
}
)
else:
cstats += [cmin, cmax, cnull]
cmax_last[name] = cmax
else:
if no_filters and column.num_values > 0:
# We are collecting statistics for divisions
# only (no filters) - Lets bail.
gather_statistics = False
file_row_group_stats = {}
file_row_group_column_stats = {}
break
if single_rg_parts:
s["columns"].append({"name": name})
else:
cstats += [None, None, None]
if gather_statistics:
file_row_group_stats[fpath].append(s)
if not single_rg_parts:
file_row_group_column_stats[fpath].append(tuple(cstats))
return (
file_row_groups,
file_row_group_stats,
file_row_group_column_stats,
gather_statistics,
)
def _construct_parts(
fs,
metadata,
schema,
filters,
index_cols,
data_path,
partition_info,
categories,
split_row_groups,
gather_statistics,
):
"""Construct ``parts`` for ddf construction
Use metadata (along with other data) to define a tuple
for each ddf partition. Also gather statistics if
``gather_statistics=True``, and other criteria is met.
"""
parts = []
stats = []
partition_keys = partition_info["partition_keys"]
partition_obj = partition_info["partitions"]
# Check if `metadata` is just a list of paths
# (not splitting by row-group or collecting statistics)
if isinstance(metadata, list) and isinstance(metadata[0], str):
for full_path in metadata:
part = {
"piece": (full_path, None, partition_keys.get(full_path, None)),
"kwargs": {"partitions": partition_obj, "categories": categories},
}
parts.append(part)
return parts, stats
# Determine which columns need statistics
flat_filters = (
set(flatten(tuple(flatten(filters, container=list)), container=tuple))
if filters
else []
)
stat_col_indices = {}
for i, name in enumerate(schema.names):
if name in index_cols or name in flat_filters:
stat_col_indices[name] = i
stat_cols = list(stat_col_indices.keys())
gather_statistics = gather_statistics and len(stat_cols) > 0
# Convert metadata into simple dictionary structures
(
file_row_groups,
file_row_group_stats,
file_row_group_column_stats,
gather_statistics,
) = _process_metadata(
metadata,
int(split_row_groups) == 1,
gather_statistics,
stat_col_indices,
flat_filters == [],
)
if split_row_groups:
# Create parts from each file,
# limiting the number of row_groups in each piece
split_row_groups = int(split_row_groups)
for filename, row_groups in file_row_groups.items():
row_group_count = len(row_groups)
for i in range(0, row_group_count, split_row_groups):
i_end = i + split_row_groups
rg_list = row_groups[i:i_end]
full_path = (
fs.sep.join([data_path, filename])
if filename != ""
else data_path # This is a single file
)
pkeys = partition_keys.get(full_path, None)
if partition_obj and pkeys is None:
continue # This partition was filtered
part = {
"piece": (full_path, rg_list, pkeys),
"kwargs": {
"partitions": partition_obj,
"categories": categories,
"filters": filters,
"schema": schema,
},
}
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename][i:i_end],
file_row_group_column_stats[filename][i:i_end],
stat_col_indices,
)
stats.append(stat)
else:
for filename, row_groups in file_row_groups.items():
full_path = (
fs.sep.join([data_path, filename])
if filename != ""
else data_path # This is a single file
)
pkeys = partition_keys.get(full_path, None)
if partition_obj and pkeys is None:
continue # This partition was filtered
rgs = None
part = {
"piece": (full_path, rgs, pkeys),
"kwargs": {
"partitions": partition_obj,
"categories": categories,
"filters": filters,
"schema": schema,
},
}
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename],
file_row_group_column_stats[filename],
stat_col_indices,
)
stats.append(stat)
return parts, stats
class ArrowEngine(Engine):
@classmethod
def read_metadata(
cls,
fs,
paths,
categories=None,
index=None,
gather_statistics=None,
filters=None,
split_row_groups=None,
**kwargs,
):
# Check if we are using pyarrow.dataset API
dataset_kwargs = kwargs.get("dataset", {})
# Gather necessary metadata information. This includes
# the schema and (parquet) partitioning information.
# This may also set split_row_groups and gather_statistics,
# depending on _metadata availability.
(
schema,
metadata,
base_path,
partition_info,
split_row_groups,
gather_statistics,
) = _gather_metadata(
paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs
)
# Process metadata to define `meta` and `index_cols`
meta, index_cols, categories, index = _generate_dd_meta(
schema, index, categories, partition_info
)
# Cannot gather_statistics if our `metadata` is a list
# of paths, or if we are building a multiindex (for now).
# We also don't "need" to gather statistics if we don't
# want to apply any filters or calculate divisions
if (isinstance(metadata, list) and isinstance(metadata[0], str)) or len(
index_cols
) > 1:
gather_statistics = False
elif filters is None and len(index_cols) == 0:
gather_statistics = False
# Make sure gather_statistics allows filtering
# (if filters are desired)
if filters:
# Filters may require us to gather statistics
if gather_statistics is False and partition_info["partition_names"]:
warnings.warn(
"Filtering with gather_statistics=False. "
"Only partition columns will be filtered correctly."
)
elif gather_statistics is False:
raise ValueError("Cannot apply filters with gather_statistics=False")
elif not gather_statistics:
gather_statistics = True
# Finally, construct our list of `parts`
# (and a corresponding list of statistics)
parts, stats = _construct_parts(
fs,
metadata,
schema,
filters,
index_cols,
base_path,
partition_info,
categories,
split_row_groups,
gather_statistics,
)
return (meta, stats, parts, index)
@classmethod
def read_partition(
cls,
fs,
piece,
columns,
index,
categories=(),
partitions=(),
filters=None,
schema=None,
**kwargs,
):
if isinstance(index, list):
for level in index:
# unclear if we can use set ops here. I think the order matters.
# Need the membership test to avoid duplicating index when
# we slice with `columns` later on.
if level not in columns:
columns.append(level)
# Ensure `columns` and `partitions` do not overlap
columns_and_parts = columns.copy()
if columns_and_parts and partitions:
for part_name in partitions.partition_names:
if part_name in columns:
columns.remove(part_name)
else:
columns_and_parts.append(part_name)
columns = columns or None
if isinstance(piece, str):
# `piece` is a file-path string
path = piece
row_group = None
partition_keys = None
else:
# `piece` contains (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
if not isinstance(row_group, list):
row_group = [row_group]
dfs = []
for rg in row_group:
piece = pq.ParquetDatasetPiece(
path,
row_group=rg,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
arrow_table = cls._parquet_piece_as_arrow(
piece, columns, partitions, **kwargs
)
df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)
if len(row_group) > 1:
dfs.append(df)
if len(row_group) > 1:
df = pd.concat(dfs)
# Note that `to_pandas(ignore_metadata=False)` means
# pyarrow will use the pandas metadata to set the index.
index_in_columns_and_parts = set(df.index.names).issubset(
set(columns_and_parts)
)
if not index:
if index_in_columns_and_parts:
# User does not want to set index and a desired
# column/partition has been set to the index
df.reset_index(drop=False, inplace=True)
else:
# User does not want to set index and an
# "unwanted" column has been set to the index
df.reset_index(drop=True, inplace=True)
else:
if set(df.index.names) != set(index) and index_in_columns_and_parts:
# The wrong index has been set and it contains
# one or more desired columns/partitions
df.reset_index(drop=False, inplace=True)
elif index_in_columns_and_parts:
# The correct index has already been set
index = False
columns_and_parts = list(
set(columns_and_parts).difference(set(df.index.names))
)
df = df[list(columns_and_parts)]
if index:
df = df.set_index(index)
return df
@classmethod
def _arrow_table_to_pandas(
cls, arrow_table: pa.Table, categories, **kwargs
) -> pd.DataFrame:
_kwargs = kwargs.get("arrow_to_pandas", {})
_kwargs.update({"use_threads": False, "ignore_metadata": False})
return arrow_table.to_pandas(categories=categories, **_kwargs)
@classmethod
def _parquet_piece_as_arrow(
cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs
) -> pa.Table:
arrow_table = piece.read(
columns=columns,
partitions=partitions,
use_pandas_metadata=True,
use_threads=False,
**kwargs.get("read", {}),
)
return arrow_table
@staticmethod
def initialize_write(
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
schema=None,
index_cols=None,
**kwargs,
):
# Infer schema if "infer"
# (also start with inferred schema if user passes a dict)
if schema == "infer" or isinstance(schema, dict):
# Start with schema from _meta_nonempty
_schema = pa.Schema.from_pandas(
df._meta_nonempty.set_index(index_cols)
if index_cols
else df._meta_nonempty
)
# Use dict to update our inferred schema
if isinstance(schema, dict):
schema = pa.schema(schema)
for name in schema.names:
i = _schema.get_field_index(name)
j = schema.get_field_index(name)
_schema = _schema.set(i, schema.field(j))
# If we have object columns, we need to sample partitions
# until we find non-null data for each column in `sample`
sample = [col for col in df.columns if df[col].dtype == "object"]
if schema_field_supported and sample and schema == "infer":
delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)
for i in range(df.npartitions):
# Keep data on worker
_s = delayed_schema_from_pandas(
df[sample].to_delayed()[i]
).compute()
for name, typ in zip(_s.names, _s.types):
if typ != "null":
i = _schema.get_field_index(name)
j = _s.get_field_index(name)
_schema = _schema.set(i, _s.field(j))
sample.remove(name)
if not sample:
break
# Final (inferred) schema
schema = _schema
dataset = fmd = None
i_offset = 0
if append and division_info is None:
ignore_divisions = True
fs.mkdirs(path, exist_ok=True)
if append:
try:
# Allow append if the dataset exists.
# Also need dataset.metadata object if
# ignore_divisions is False (to check divisions)
dataset = pq.ParquetDataset(path, filesystem=fs)
if not dataset.metadata and not ignore_divisions:
# TODO: Be more flexible about existing metadata.
raise NotImplementedError(
"_metadata file needed to `append` "
"with `engine='pyarrow'` "
"unless `ignore_divisions` is `True`"
)
fmd = dataset.metadata
except (IOError, ValueError, IndexError):
# Original dataset does not exist - cannot append
append = False
if append:
names = dataset.metadata.schema.names
has_pandas_metadata = (
dataset.schema.to_arrow_schema().metadata is not None
and b"pandas" in dataset.schema.to_arrow_schema().metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(
dataset.schema.to_arrow_schema().metadata[b"pandas"].decode("utf8")
)
categories = [
c["name"]
for c in pandas_metadata["columns"]
if c["pandas_type"] == "categorical"
]
else:
categories = None
dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)
if set(names) != set(df.columns) - set(partition_on):
raise ValueError(
"Appended columns not the same.\n"
"Previous: {} | New: {}".format(names, list(df.columns))
)
elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():
# TODO Coerce values for compatible but different dtypes
raise ValueError(
"Appended dtypes differ.\n{}".format(
set(dtypes.items()) ^ set(df.dtypes.iteritems())
)
)
i_offset = len(dataset.pieces)
if division_info["name"] not in names:
ignore_divisions = True
if not ignore_divisions:
old_end = None
row_groups = [
dataset.metadata.row_group(i)
for i in range(dataset.metadata.num_row_groups)
]
for row_group in row_groups:
for i, name in enumerate(names):
if name != division_info["name"]:
continue
column = row_group.column(i)
if column.statistics:
if not old_end:
old_end = column.statistics.max
else:
old_end = max(old_end, column.statistics.max)
break
divisions = division_info["divisions"]
if divisions[0] < old_end:
raise ValueError(
"Appended divisions overlapping with the previous ones"
" (set ignore_divisions=True to append anyway).\n"
"Previous: {} | New: {}".format(old_end, divisions[0])
)
return fmd, schema, i_offset
@classmethod
def _pandas_to_arrow_table(
cls, df: pd.DataFrame, preserve_index=False, schema=None
) -> pa.Table:
table = pa.Table.from_pandas(df, preserve_index=preserve_index, schema=schema)
return table
@classmethod
def write_partition(
cls,
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
schema=None,
head=False,
**kwargs,
):
_meta = None
preserve_index = False
if _index_in_schema(index_cols, schema):
df.set_index(index_cols, inplace=True)
preserve_index = True
else:
index_cols = []
t = cls._pandas_to_arrow_table(df, preserve_index=preserve_index, schema=schema)
if partition_on:
md_list = _write_partitioned(
t,
path,
filename,
partition_on,
fs,
index_cols=index_cols,
compression=compression,
**kwargs,
)
if md_list:
_meta = md_list[0]
for i in range(1, len(md_list)):
_append_row_groups(_meta, md_list[i])
else:
md_list = []
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
_meta = md_list[0]
_meta.set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
d = {"meta": _meta}
if head:
# Only return schema if this is the "head" partition
d["schema"] = t.schema
return [d]
else:
return []
@staticmethod
def write_metadata(parts, fmd, fs, path, append=False, **kwargs):
schema = parts[0][0].get("schema", None)
parts = [p for p in parts if p[0]["meta"] is not None]
if parts:
if not append:
# Get only arguments specified in the function
common_metadata_path = fs.sep.join([path, "_common_metadata"])
keywords = getargspec(pq.write_metadata).args
kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}
with fs.open(common_metadata_path, "wb") as fil:
pq.write_metadata(schema, fil, **kwargs_meta)
# Aggregate metadata and write to _metadata file
metadata_path = fs.sep.join([path, "_metadata"])
if append and fmd is not None:
_meta = fmd
i_start = 0
else:
_meta = parts[0][0]["meta"]
i_start = 1
for i in range(i_start, len(parts)):
_append_row_groups(_meta, parts[i][0]["meta"])
with fs.open(metadata_path, "wb") as fil:
_meta.write_metadata_file(fil)
@classmethod
def collect_file_metadata(cls, path, fs, file_path):
with fs.open(path, "rb") as f:
meta = pq.ParquetFile(f).metadata
if file_path:
meta.set_file_path(file_path)
return meta
@classmethod
def aggregate_metadata(cls, meta_list, fs, out_path):
meta = None
for _meta in meta_list:
if meta:
_append_row_groups(meta, _meta)
else:
meta = _meta
if out_path:
metadata_path = fs.sep.join([out_path, "_metadata"])
with fs.open(metadata_path, "wb") as fil:
if not meta:
raise ValueError("Cannot write empty metdata!")
meta.write_metadata_file(fil)
return None
else:
return meta
| {
"repo_name": "blaze/dask",
"path": "dask/dataframe/io/parquet/arrow.py",
"copies": "1",
"size": "40090",
"license": "bsd-3-clause",
"hash": -730241780850876700,
"line_mean": 35.9493087558,
"line_max": 88,
"alpha_frac": 0.5235719631,
"autogenerated": false,
"ratio": 4.407431838170624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5431003801270624,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import defaultdict
import re
from adder import problem, logic
from adder.logic import (Braces, LogicOperator, DefiniteClause,
standardize_variables, StandartizationReplacer,
skolemize, unify, unify_substitutions, substitute,
propagate_substitutions,
find_variables_symbol, find_variables_expression,
is_subsumed_by)
from adder.utils import ParsingError
from adder.cnfparser import (parse_fo_sentence as parse_cnf,
is_fo_disjunction_tautology, print_cnf)
def backward_chaining(kb, query):
all_variables = find_variables_symbol(query)
query, var_map = standardize_variables(query)
result = __backward_chaining_or(kb, query, {})
if result is not problem.FAILURE:
propagate_substitutions(result)
query_vars = {var: result[var_map[var]] for var in all_variables}
return query_vars
return result
def __backward_chaining_or(kb, query, theta):
for implication in __fetch_implications(kb, query):
premises, conclusion = implication.premises, implication.conclusion
subst = __backward_chaining_and(kb, premises, unify(query, conclusion, theta))
if subst is not problem.FAILURE:
return subst
return problem.FAILURE
def __fetch_implications(kb, query):
implications = []
for implication in kb:
subst = unify(query, implication.conclusion)
if subst != problem.FAILURE:
implication.standardize()
implications.append(implication)
return implications
def __backward_chaining_and(kb, goals, theta):
if theta is problem.FAILURE:
return problem.FAILURE
if len(goals) == 0:
return theta
for goal in goals:
theta = __backward_chaining_or(kb, substitute(goal, theta), theta)
return theta
DefiniteKnowledgeBase = partial(logic.DefiniteKnowledgeBase, backward_chaining)
def is_subsumed_in(clause, clause_set):
# 3 x Wut, wut for the functional solution
return any(all(any(is_subsumed_by(clause_element, disjunct)
for clause_element in clause)
for disjunct in disjunction)
for disjunction in clause_set)
class ClauseBindingMapper:
def __init__(self):
self.container = defaultdict(list)
self.memo = {}
def get_from_key(self, c1, c2, i, j):
key = (c1, c2, i, j)
if self.memo.get(key) is None:
self.memo[key] = unify_substitutions(self.container[c1][i],
self.container[c2][j])
return self.memo[key]
def get_unified_bindings(self, c1, c2):
if len(self.container[c1]) == 0:
self.container[c1].append({})
if len(self.container[c2]) == 0:
self.container[c2].append({})
return (self.get_from_key(c1, c2, i, j)
for i, th1 in enumerate(self.container[c1])
for j, th2 in enumerate(self.container[c2]))
def resolution_prover(knowledge_base, query, max_clause_len, is_complete):
negated_query = "{0}({1})".format(LogicOperator.Negation, query)
not_query_cnf = parse_cnf(negated_query)
clauses = not_query_cnf + knowledge_base
new_inferrences = set()
already_resolved = set()
clause_theta_mapping = ClauseBindingMapper()
clauses2 = parse_cnf(query) + knowledge_base
new_inferrences2 = set()
already_resolved2 = set()
clause_theta_mapping2 = ClauseBindingMapper()
empty_set = frozenset()
support = not_query_cnf
support2 = parse_cnf(query)
while True:
result = __resolution_step(new_inferrences, already_resolved,
clauses, max_clause_len,
clause_theta_mapping)
if result is not None:
if result[0]:
return {var: value for var, value in result[1].items()
if var in find_variables_expression(query)}
return problem.FAILURE
if is_complete:
result = __resolution_step(new_inferrences2, already_resolved2,
clauses2, max_clause_len,
clause_theta_mapping2)
if result is not None:
if result[0]:
return {var: value for var, value in result[1].items()
if var in find_variables_expression(query)}
return problem.FAILURE
def __resolution_step(new_inferrences, already_resolved,
clauses, max_clause_len,
clause_mapping, empty_set=frozenset()):
new_inferrences.clear()
pairs = ((clauses[i], clauses[j])
for i in range(len(clauses))
for j in range(i + 1, len(clauses))
if (clauses[i], clauses[j]) not in already_resolved)
for c1, c2 in pairs:
resolvents = __resolve(c1, c2, max_clause_len, clause_mapping)
if empty_set in resolvents:
bindings = clause_mapping.container[empty_set][0]
bindings = propagate_substitutions(bindings)
return True, bindings
new_inferrences.update(resolvents)
already_resolved.add((c1, c2))
if new_inferrences.issubset(clauses):
return False, {}
clauses.extend(clause for clause in new_inferrences
if not is_subsumed_in(clause, clauses))
return None
def __resolve(c1, c2, max_len, clause_mapping):
resolvents = []
__resolve_single_sided(c1, c2, resolvents, clause_mapping)
__resolve_single_sided(c2, c1, resolvents, clause_mapping)
return [r for r in resolvents
if len(r) < max_len and not is_fo_disjunction_tautology(r)]
def __resolve_single_sided(c1, c2, resolvents, clause_mapping):
for symbol1 in c1:
negation = LogicOperator.Negation + symbol1
for symbol2 in c2:
for th in clause_mapping.get_unified_bindings(c1, c2):
theta = unify(negation, symbol2, th)
if theta is not problem.FAILURE:
result = c1.union(c2).difference({symbol1, symbol2})
resolvent = __standardize_resolvent(result, theta)
resolvents.append(resolvent)
clause_mapping.container[resolvent].append(theta)
def __standardize_resolvent(resolvent, theta):
replacer = StandartizationReplacer("x")
result = set()
for index, disjunct in enumerate(resolvent):
# Do we need standartization? No, we don't
result.add(substitute(disjunct, theta))
StandartizationReplacer.GlobalIndex = replacer.index
resolvent = frozenset(result)
return resolvent
def __parser(sentence):
return parse_cnf(standardize_variables(sentence)[0])
KnowledgeBase = partial(logic.KnowledgeBase, __parser, resolution_prover)
| {
"repo_name": "NikolaDimitroff/Adder",
"path": "adder/fologic.py",
"copies": "1",
"size": "7026",
"license": "mit",
"hash": 781592235168366800,
"line_mean": 34.4848484848,
"line_max": 86,
"alpha_frac": 0.61058924,
"autogenerated": false,
"ratio": 3.8414434117003826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951401142448184,
"avg_score": 0.00012630185043978147,
"num_lines": 198
} |
from functools import partial
from collections import defaultdict
from .models import UNTITLED_ARTIST, UNTITLED_ALBUM
FIELDS_ORDER = [
'id',
'source',
'source_id',
'title',
'number',
'mbid',
'artist_mbid',
'album_mbid'
]
def compress_tracks_to_json(tracks):
"""
Serialize tracks to a list with fields as defined in `FIELDS_ORDER`.
"""
collection = defaultdict(partial(defaultdict, list))
for track in tracks:
source = track['source'][0] # [dy]
source_id = None
if source == 'y':
source_id = track['youtube']['id']
artist = track.get('artist') or UNTITLED_ARTIST
album = track.get('album') or UNTITLED_ALBUM
collection[artist][album].append((
str(track['_id']),
source,
source_id,
track['title'],
track.get('number') or 0,
track.get('mbid'),
track.get('artist_mbid'),
track.get('album_mbid')
))
return collection
| {
"repo_name": "seem-sky/cloudtunes",
"path": "cloudtunes-server/cloudtunes/library/utils.py",
"copies": "14",
"size": "1042",
"license": "bsd-3-clause",
"hash": 7332052926431564000,
"line_mean": 22.1555555556,
"line_max": 72,
"alpha_frac": 0.5537428023,
"autogenerated": false,
"ratio": 3.7482014388489207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import defaultdict
from pygoap.actions import Action
from pygoap.goals import *
from pygoap.precepts import *
from storymaker.english import make_english
def get_known_agents(agent):
"""get all known entities at this point
do by checking for "name" DatumPrecepts
"""
for p in agent.memory:
try:
if p.entity is not agent:
yield p.entity
except AttributeError:
continue
def opposite_sex(agent, other):
return not agent.sex == other.sex
class GiveBirthAbility(Action):
"""
simulate birth
"""
def get_actions(self, parent, memory=None):
effects = [PreceptGoal(DatumPrecept(parent, "has baby", True)),
PreceptGoal(DatumPrecept(parent, "ready to birth", False))]
prereqs = [PreceptGoal(DatumPrecept(parent, "ready to birth", True))]
yield GiveBirthAction(parent, prereqs, effects)
class GiveBirthAction(Action):
def update(self, dt):
yield SpeechPrecept(self.parent, "my baby is here!")
yield ActionPrecept(self.parent, "birth", None)
class GestationAbility(Action):
"""
simulate child gestation
"""
def get_actions(self, parent, memory=None):
effects = [PreceptGoal(DatumPrecept(parent, "ready to birth", True))]
prereqs = [PreceptGoal(DatumPrecept(parent, "had sex", True))]
yield GestationAction(parent, prereqs, effects)
class GestationAction(Action):
default_duration = 5
class CopulateAbility(Action):
"""
simulate sex
"""
def get_actions(self, parent, memory=None):
f = partial(opposite_sex, parent)
for other in filter(f, get_known_agents(parent)):
effects = [PreceptGoal(ActionPrecept(parent, "sex", other)),
PreceptGoal(DatumPrecept(parent, "had sex", True))]
yield CopulateAction(parent, None, effects, other=other)
class CopulateAction(Action):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.other = kwargs.get('other', None)
assert (self.other is not None)
def update(self, dt):
yield ActionPrecept(self.parent, "sex", self.other)
class SpeakAbility(Action):
"""
examine parent's memory and create some things to say
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.perception_map = defaultdict(list)
def get_actions(self, parent, memory=None):
import random
if memory is not None:
if len(memory) == 0:
raise StopIteration
p = random.choice(list(memory))
if p not in self.perception_map[parent]:
# assume when speaking all actors will receive the message
self.perception_map[parent].append(p)
effects = [PreceptGoal(DatumPrecept(parent, "chatter", True))]
yield SpeakAction(parent, None, effects, precept=p)
class SpeakAction(Action):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.p = kwargs.get('precept', None)
assert (self.p is not None)
def update(self, dt):
msg = SpeechPrecept(self.parent, make_english(self.parent, self.p))
yield msg # return a speech precept
# return the original precept (simulates passing of info through speech)
yield self.p
# filters should modify the agent's traits or mood
def copulate_filter(agent, p):
try:
assert (isinstance(p, ActionPrecept))
assert (p.entity is agent)
except AssertionError:
return [p]
r = [p]
value = 0
to_remove = list()
for mp in agent.memory.of_class(MoodPrecept):
if mp.entity is agent and mp.name == 'content':
value += mp.value
to_remove.append(mp)
for mp in to_remove:
agent.memory.remove(mp)
if p.action == "sex":
value += .01
p = MoodPrecept(agent, 'content', value)
r.append(p)
return r
def conversation_filter(agent, p):
try:
assert (isinstance(p, SpeechPrecept))
except AssertionError:
yield p
raise StopIteration
agent.moods.content.value -= .1
if agent.moods.content.value < 0:
agent.moods.content.value = 0.0
yield p
| {
"repo_name": "bitcraft/storymaker",
"path": "storymaker/actions.py",
"copies": "1",
"size": "4398",
"license": "mit",
"hash": 160214806564324320,
"line_mean": 26.835443038,
"line_max": 80,
"alpha_frac": 0.6170986812,
"autogenerated": false,
"ratio": 3.7946505608283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9911358555983605,
"avg_score": 0.0000781372089388967,
"num_lines": 158
} |
from functools import partial
from collections import defaultdict
import sqlalchemy
from ..types import String, Integer
from ..graph import Nothing, Maybe, One, Many
from ..engine import pass_context
def _translate_type(column):
if isinstance(column.type, sqlalchemy.Integer):
return Integer
elif isinstance(column.type, sqlalchemy.Unicode):
return String
else:
return None
def _table_repr(table):
return 'Table({})'.format(', '.join(
[repr(table.name), repr(table.metadata), '...',
'schema={!r}'.format(table.schema)]
))
@pass_context
class FieldsQuery:
def __init__(self, engine_key, from_clause, *, primary_key=None):
self.engine_key = engine_key
self.from_clause = from_clause
if primary_key is not None:
self.primary_key = primary_key
else:
# currently only one column supported
self.primary_key, = from_clause.primary_key
def __repr__(self):
if isinstance(self.from_clause, sqlalchemy.Table):
from_clause_repr = _table_repr(self.from_clause)
else:
from_clause_repr = repr(self.from_clause)
return ('<{}.{}: engine_key={!r}, from_clause={}, primary_key={!r}>'
.format(self.__class__.__module__, self.__class__.__name__,
self.engine_key, from_clause_repr, self.primary_key))
def __postprocess__(self, field):
if field.type is None:
column = self.from_clause.c[field.name]
field.type = _translate_type(column)
def in_impl(self, column, values):
return column.in_(values)
def select_expr(self, fields_, ids):
columns = [self.from_clause.c[f.name] for f in fields_]
expr = (
sqlalchemy.select([self.primary_key] + columns)
.select_from(self.from_clause)
.where(self.in_impl(self.primary_key, ids))
)
def result_proc(rows):
rows_map = {row[self.primary_key]: [row[c] for c in columns]
for row in rows}
nulls = [None for _ in fields_]
return [rows_map.get(id_, nulls) for id_ in ids]
return expr, result_proc
def __call__(self, ctx, fields_, ids):
if not ids:
return []
expr, result_proc = self.select_expr(fields_, ids)
sa_engine = ctx[self.engine_key]
with sa_engine.connect() as connection:
rows = connection.execute(expr).fetchall()
return result_proc(rows)
def _to_maybe_mapper(pairs, values):
mapping = dict(pairs)
return [mapping.get(value, Nothing) for value in values]
def _to_one_mapper(pairs, values):
mapping = dict(pairs)
return [mapping[value] for value in values]
def _to_many_mapper(pairs, values):
mapping = defaultdict(list)
for from_value, to_value in pairs:
mapping[from_value].append(to_value)
return [mapping[value] for value in values]
class LinkQuery:
def __init__(self, engine_key, *, from_column, to_column):
if from_column.table is not to_column.table:
raise ValueError('from_column and to_column should belong to '
'one table')
self.engine_key = engine_key
self.from_column = from_column
self.to_column = to_column
def __repr__(self):
return ('<{}.{}: engine_key={!r}, from_column={!r}, to_column={!r}>'
.format(self.__class__.__module__, self.__class__.__name__,
self.engine_key, self.from_column, self.to_column))
def __postprocess__(self, link):
if link.type_enum is One:
func = partial(self, _to_one_mapper)
elif link.type_enum is Maybe:
func = partial(self, _to_maybe_mapper)
elif link.type_enum is Many:
func = partial(self, _to_many_mapper)
else:
raise TypeError(repr(link.type_enum))
link.func = pass_context(func)
def in_impl(self, column, values):
return column.in_(values)
def select_expr(self, ids):
# TODO: make this optional, but enabled by default
filtered_ids = [i for i in set(ids) if i is not None]
if filtered_ids:
return (
sqlalchemy.select([self.from_column.label('from_column'),
self.to_column.label('to_column')])
.where(self.in_impl(self.from_column, filtered_ids))
)
else:
return None
def __call__(self, result_proc, ctx, ids):
expr = self.select_expr(ids)
if expr is None:
pairs = []
else:
sa_engine = ctx[self.engine_key]
with sa_engine.connect() as connection:
pairs = connection.execute(expr).fetchall()
return result_proc(pairs, ids)
| {
"repo_name": "vmagamedov/hiku",
"path": "hiku/sources/sqlalchemy.py",
"copies": "1",
"size": "4903",
"license": "bsd-3-clause",
"hash": 5835504299556532,
"line_mean": 31.045751634,
"line_max": 77,
"alpha_frac": 0.5737303692,
"autogenerated": false,
"ratio": 3.8667192429022084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9940449612102209,
"avg_score": 0,
"num_lines": 153
} |
from functools import partial
from collections import defaultdict
class FSM(object):
"""
A simple finite state machine that defines a set of states and the
transitions between those states. The constructor takes the name of the
initial state, a list of transitions, and a dictionary of callbacks.
`transitions` is a list of dictionaries with 'source', 'destination', and
'name' keys. Transitions of the same name may have the different sources or
map to different destinations if their sources differ.
Callbacks have keys that correspond to transition names and are prefixed
with 'on_' or 'on_before_'. For example, a transition named 'enter'
could have the callbacks 'on_enter' and 'on_before_enter'. The
'on_before' callbacks return True if the transition should occur or False
to short circuit the impending transition. Return values of the 'on'
callbacks are passed to the caller of the transition (e.g. result in the
expression `result = fms.enter()` would be set to the return of `on_enter`)
Transitions may take any positional or keyword arguments. These are passed
to the associated callback(s) for the transition. In the above example, a
call to fms.enter(score=10), would pass the score=10 kwarg to the
`on_enter` and `on_before_enter` for handling.
"""
class IllegalNameException(Exception):
pass
class IllegalCallbackException(Exception):
pass
class IllegalTransitionException(Exception):
pass
def __init__(self, initial, transitions=None, callbacks=None):
callbacks = callbacks or {}
transitions = transitions or []
# Define the initial state and store the transitions and callbacks if
# provided
self.state = initial
# Tracks the set of possible states or "nodes" that the machine may enter
self.possible_states = set([initial])
# Maintains the mapping of each callback's name to the callback function
self.callbacks = {}
# Maintains the mapping of a source/transition name pair to a destination
self.transitions = {}
# Maintains the mapping of each source to the list of possible transitions
self._source_to_names = defaultdict(set)
map(self.add_transition, transitions)
map(lambda k_v: self.add_callback(*k_v), callbacks.items())
def add_transition(self, transition):
"""
Given a transition dictionary that defines a `source`, `name`, and
`destination`, add the transition function with the `name` that moves
the state from the `source` to the `destination`.
An IllegalNameException is thrown if the transition name would override
another method.
"""
source, name = transition['source'], transition['name']
destination = transition['destination']
# Assure transition names won't override existing methods
transition_names = [
t_name for t_source, t_name in self.transitions.keys()
]
reserved_methods = set(dir(self)).difference(transition_names)
if name in reserved_methods:
err_msg = u'The transition name `{0}` shadows an existing method'
raise self.IllegalNameException(err_msg.format(name))
# Assure transition won't override an existing one. (Transitions are
# unique by the pairing of a source and transition name).
if (source, name) in self.transitions:
err_msg = u'The transition name `{0}` already maps {1} to {2}'
existing_destination = self.transitions[(source, name)]
raise self.IllegalNameException(
err_msg.format(name, source, existing_destination)
)
# Update transitions, possible_states
self.transitions.update({
(source, name): destination
})
self._source_to_names[source].add(name)
self.possible_states.add(source)
self.possible_states.add(destination)
if not hasattr(self, name):
# Create a function for the transition
func = partial(self._transition_function_factory, source, name)
setattr(self, name, func)
def add_callback(self, name, func):
"""
Given a `name` and `func`, registers the function `func` as a callback
for the transition associated with `name`.
An IllegalCallbackException is thrown if the callback name does not
correspond to an existing transition. This is meant to safeguard
against registering callbacks with incorrect names, which will never be
called.
"""
# Determine the name of the associated transition
transition_name = name[3:]
if transition_name.startswith('before_'):
transition_name = transition_name[7:]
transition_names = [
t_name for t_source, t_name in self.transitions.keys()
]
if transition_name not in transition_names:
err_msg = u'Callback {0} can not be registered because {1} is not a transition name'
raise self.IllegalCallbackException(
err_msg.format(name, transition_name)
)
self.callbacks.update({
name: func
})
def _transition_function_factory(self, source, name, *args, **kwargs):
"""
Given an existing transition's `source` and `name` create a method with
that name, that moves the state from the source to the destination
state. When called, validates that the current state is the source
state, and calls any registered callbacks for the transition.
"""
destination = self.transitions.get((source, name), None)
if destination is not None and self.can(name):
if self.callbacks:
resume = self._call_callback(name, 'before', *args, **kwargs)
if resume:
self.state = destination
return self._call_callback(name, '', *args, **kwargs)
else:
self.state = destination
else:
err_msg = '{0} called when current state was {1}'
raise self.IllegalTransitionException(
err_msg.format(name, self.state)
)
def _call_callback(self, transition_name, prefix, *args, **kwargs):
"""Calls the callback on behalf of the transition function"""
if prefix:
name_parts = ('on', prefix, transition_name)
else:
name_parts = ('on', transition_name)
callback_name = '_'.join(name_parts)
callback = self.callbacks.get(callback_name, None)
if callback:
return callback(*args, **kwargs)
return True
def is_state(self, check_state):
"""Checks if the current state is `check_state`"""
return self.state == check_state
def can(self, name):
"""
Checks if the given `name` is a possible transition from the current
state
"""
return name in self._source_to_names[self.state]
def __repr__(self):
return u'State machine: ({0}) '.format(self.state) + u' '.join([
state
for state in self.possible_states
if state != self.state
])
def callbacks_display(self):
return self.callbacks.keys()
def transitions_display(self):
return sorted([
'{0}: {1} -> {2}'.format(name, source, destination)
for (source, name), destination in self.transitions.items()
])
| {
"repo_name": "calebsmith/yape",
"path": "yape/fsm.py",
"copies": "1",
"size": "7625",
"license": "bsd-2-clause",
"hash": 6797321831755299000,
"line_mean": 41.8370786517,
"line_max": 96,
"alpha_frac": 0.630557377,
"autogenerated": false,
"ratio": 4.565868263473054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003319853597549691,
"num_lines": 178
} |
from functools import partial
from collections import deque
from llvmlite import ir
from numba.core.datamodel.registry import register_default
from numba.core import types, cgutils
from numba.np import numpy_support
class DataModel(object):
"""
DataModel describe how a FE type is represented in the LLVM IR at
different contexts.
Contexts are:
- value: representation inside function body. Maybe stored in stack.
The representation here are flexible.
- data: representation used when storing into containers (e.g. arrays).
- argument: representation used for function argument. All composite
types are unflattened into multiple primitive types.
- return: representation used for return argument.
Throughput the compiler pipeline, a LLVM value is usually passed around
in the "value" representation. All "as_" prefix function converts from
"value" representation. All "from_" prefix function converts to the
"value" representation.
"""
def __init__(self, dmm, fe_type):
self._dmm = dmm
self._fe_type = fe_type
@property
def fe_type(self):
return self._fe_type
def get_value_type(self):
raise NotImplementedError(self)
def get_data_type(self):
return self.get_value_type()
def get_argument_type(self):
"""Return a LLVM type or nested tuple of LLVM type
"""
return self.get_value_type()
def get_return_type(self):
return self.get_value_type()
def as_data(self, builder, value):
raise NotImplementedError(self)
def as_argument(self, builder, value):
"""
Takes one LLVM value
Return a LLVM value or nested tuple of LLVM value
"""
raise NotImplementedError(self)
def as_return(self, builder, value):
raise NotImplementedError(self)
def from_data(self, builder, value):
raise NotImplementedError(self)
def from_argument(self, builder, value):
"""
Takes a LLVM value or nested tuple of LLVM value
Returns one LLVM value
"""
raise NotImplementedError(self)
def from_return(self, builder, value):
raise NotImplementedError(self)
def load_from_data_pointer(self, builder, ptr, align=None):
"""
Load value from a pointer to data.
This is the default implementation, sufficient for most purposes.
"""
return self.from_data(builder, builder.load(ptr, align=align))
def traverse(self, builder):
"""
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
"""
return []
def traverse_models(self):
"""
Recursively list all models involved in this model.
"""
return [self._dmm[t] for t in self.traverse_types()]
def traverse_types(self):
"""
Recursively list all frontend types involved in this model.
"""
types = [self._fe_type]
queue = deque([self])
while len(queue) > 0:
dm = queue.popleft()
for i_dm in dm.inner_models():
if i_dm._fe_type not in types:
queue.append(i_dm)
types.append(i_dm._fe_type)
return types
def inner_models(self):
"""
List all *inner* models.
"""
return []
def get_nrt_meminfo(self, builder, value):
"""
Returns the MemInfo object or None if it is not tracked.
It is only defined for types.meminfo_pointer
"""
return None
def has_nrt_meminfo(self):
return False
def contains_nrt_meminfo(self):
"""
Recursively check all contained types for need for NRT meminfo.
"""
return any(model.has_nrt_meminfo() for model in self.traverse_models())
def _compared_fields(self):
return (type(self), self._fe_type)
def __hash__(self):
return hash(tuple(self._compared_fields()))
def __eq__(self, other):
if type(self) is type(other):
return self._compared_fields() == other._compared_fields()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@register_default(types.Omitted)
class OmittedArgDataModel(DataModel):
"""
A data model for omitted arguments. Only the "argument" representation
is defined, other representations raise a NotImplementedError.
"""
# Omitted arguments don't produce any LLVM function argument.
def get_argument_type(self):
return ()
def as_argument(self, builder, val):
return ()
def from_argument(self, builder, val):
assert val == (), val
return None
@register_default(types.Boolean)
@register_default(types.BooleanLiteral)
class BooleanModel(DataModel):
_bit_type = ir.IntType(1)
_byte_type = ir.IntType(8)
def get_value_type(self):
return self._bit_type
def get_data_type(self):
return self._byte_type
def get_return_type(self):
return self.get_data_type()
def get_argument_type(self):
return self.get_data_type()
def as_data(self, builder, value):
return builder.zext(value, self.get_data_type())
def as_argument(self, builder, value):
return self.as_data(builder, value)
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_data(self, builder, value):
ty = self.get_value_type()
resalloca = cgutils.alloca_once(builder, ty)
cond = builder.icmp_unsigned('==', value, value.type(0))
with builder.if_else(cond) as (then, otherwise):
with then:
builder.store(ty(0), resalloca)
with otherwise:
builder.store(ty(1), resalloca)
return builder.load(resalloca)
def from_argument(self, builder, value):
return self.from_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
class PrimitiveModel(DataModel):
"""A primitive type can be represented natively in the target in all
usage contexts.
"""
def __init__(self, dmm, fe_type, be_type):
super(PrimitiveModel, self).__init__(dmm, fe_type)
self.be_type = be_type
def get_value_type(self):
return self.be_type
def as_data(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_data(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def from_return(self, builder, value):
return value
class ProxyModel(DataModel):
"""
Helper class for models which delegate to another model.
"""
def get_value_type(self):
return self._proxied_model.get_value_type()
def get_data_type(self):
return self._proxied_model.get_data_type()
def get_return_type(self):
return self._proxied_model.get_return_type()
def get_argument_type(self):
return self._proxied_model.get_argument_type()
def as_data(self, builder, value):
return self._proxied_model.as_data(builder, value)
def as_argument(self, builder, value):
return self._proxied_model.as_argument(builder, value)
def as_return(self, builder, value):
return self._proxied_model.as_return(builder, value)
def from_data(self, builder, value):
return self._proxied_model.from_data(builder, value)
def from_argument(self, builder, value):
return self._proxied_model.from_argument(builder, value)
def from_return(self, builder, value):
return self._proxied_model.from_return(builder, value)
@register_default(types.EnumMember)
@register_default(types.IntEnumMember)
class EnumModel(ProxyModel):
"""
Enum members are represented exactly like their values.
"""
def __init__(self, dmm, fe_type):
super(EnumModel, self).__init__(dmm, fe_type)
self._proxied_model = dmm.lookup(fe_type.dtype)
@register_default(types.Opaque)
@register_default(types.PyObject)
@register_default(types.RawPointer)
@register_default(types.NoneType)
@register_default(types.StringLiteral)
@register_default(types.EllipsisType)
@register_default(types.Function)
@register_default(types.Type)
@register_default(types.Object)
@register_default(types.Module)
@register_default(types.Phantom)
@register_default(types.ContextManager)
@register_default(types.Dispatcher)
@register_default(types.ObjModeDispatcher)
@register_default(types.ExceptionClass)
@register_default(types.Dummy)
@register_default(types.ExceptionInstance)
@register_default(types.ExternalFunction)
@register_default(types.EnumClass)
@register_default(types.IntEnumClass)
@register_default(types.NumberClass)
@register_default(types.TypeRef)
@register_default(types.NamedTupleClass)
@register_default(types.DType)
@register_default(types.RecursiveCall)
@register_default(types.MakeFunctionLiteral)
@register_default(types.Poison)
class OpaqueModel(PrimitiveModel):
"""
Passed as opaque pointers
"""
_ptr_type = ir.IntType(8).as_pointer()
def __init__(self, dmm, fe_type):
be_type = self._ptr_type
super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.MemInfoPointer)
class MemInfoModel(OpaqueModel):
def inner_models(self):
return [self._dmm.lookup(self._fe_type.dtype)]
def has_nrt_meminfo(self):
return True
def get_nrt_meminfo(self, builder, value):
return value
@register_default(types.Integer)
@register_default(types.IntegerLiteral)
class IntegerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(fe_type.bitwidth)
super(IntegerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.Float)
class FloatModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
if fe_type == types.float32:
be_type = ir.FloatType()
elif fe_type == types.float64:
be_type = ir.DoubleType()
else:
raise NotImplementedError(fe_type)
super(FloatModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.CPointer)
class PointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
self._pointee_model = dmm.lookup(fe_type.dtype)
self._pointee_be_type = self._pointee_model.get_data_type()
be_type = self._pointee_be_type.as_pointer()
super(PointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.EphemeralPointer)
class EphemeralPointerModel(PointerModel):
def get_data_type(self):
return self._pointee_be_type
def as_data(self, builder, value):
value = builder.load(value)
return self._pointee_model.as_data(builder, value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.EphemeralArray)
class EphemeralArrayModel(PointerModel):
def __init__(self, dmm, fe_type):
super(EphemeralArrayModel, self).__init__(dmm, fe_type)
self._data_type = ir.ArrayType(self._pointee_be_type,
self._fe_type.count)
def get_data_type(self):
return self._data_type
def as_data(self, builder, value):
values = [builder.load(cgutils.gep_inbounds(builder, value, i))
for i in range(self._fe_type.count)]
return cgutils.pack_array(builder, values)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.ExternalFunctionPointer)
class ExternalFuncPointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
sig = fe_type.sig
# Since the function is non-Numba, there is no adaptation
# of arguments and return value, hence get_value_type().
retty = dmm.lookup(sig.return_type).get_value_type()
args = [dmm.lookup(t).get_value_type() for t in sig.args]
be_type = ir.PointerType(ir.FunctionType(retty, args))
super(ExternalFuncPointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.UniTuple)
@register_default(types.NamedUniTuple)
@register_default(types.StarArgUniTuple)
class UniTupleModel(DataModel):
def __init__(self, dmm, fe_type):
super(UniTupleModel, self).__init__(dmm, fe_type)
self._elem_model = dmm.lookup(fe_type.dtype)
self._count = len(fe_type)
self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
self._count)
self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
self._count)
def get_value_type(self):
return self._value_type
def get_data_type(self):
return self._data_type
def get_return_type(self):
return self.get_value_type()
def get_argument_type(self):
return (self._elem_model.get_argument_type(),) * self._count
def as_argument(self, builder, value):
out = []
for i in range(self._count):
v = builder.extract_value(value, [i])
v = self._elem_model.as_argument(builder, v)
out.append(v)
return out
def from_argument(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i, v in enumerate(value):
v = self._elem_model.from_argument(builder, v)
out = builder.insert_value(out, v, [i])
return out
def as_data(self, builder, value):
out = ir.Constant(self.get_data_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.as_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def from_data(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.from_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def traverse(self, builder):
def getter(i, value):
return builder.extract_value(value, i)
return [(self._fe_type.dtype, partial(getter, i))
for i in range(self._count)]
def inner_models(self):
return [self._elem_model]
class CompositeModel(DataModel):
"""Any model that is composed of multiple other models should subclass from
this.
"""
pass
class StructModel(CompositeModel):
_value_type = None
_data_type = None
def __init__(self, dmm, fe_type, members):
super(StructModel, self).__init__(dmm, fe_type)
if members:
self._fields, self._members = zip(*members)
else:
self._fields = self._members = ()
self._models = tuple([self._dmm.lookup(t) for t in self._members])
def get_member_fe_type(self, name):
"""
StructModel-specific: get the Numba type of the field named *name*.
"""
pos = self.get_field_position(name)
return self._members[pos]
def get_value_type(self):
if self._value_type is None:
self._value_type = ir.LiteralStructType([t.get_value_type()
for t in self._models])
return self._value_type
def get_data_type(self):
if self._data_type is None:
self._data_type = ir.LiteralStructType([t.get_data_type()
for t in self._models])
return self._data_type
def get_argument_type(self):
return tuple([t.get_argument_type() for t in self._models])
def get_return_type(self):
return self.get_data_type()
def _as(self, methname, builder, value):
extracted = []
for i, dm in enumerate(self._models):
extracted.append(getattr(dm, methname)(builder,
self.get(builder, value, i)))
return tuple(extracted)
def _from(self, methname, builder, value):
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, (dm, val) in enumerate(zip(self._models, value)):
v = getattr(dm, methname)(builder, val)
struct = self.set(builder, struct, v, i)
return struct
def as_data(self, builder, value):
"""
Converts the LLVM struct in `value` into a representation suited for
storing into arrays.
Note
----
Current implementation rarely changes how types are represented for
"value" and "data". This is usually a pointless rebuild of the
immutable LLVM struct value. Luckily, LLVM optimization removes all
redundancy.
Sample usecase: Structures nested with pointers to other structures
that can be serialized into a flat representation when storing into
array.
"""
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_data(self, builder, value):
"""
Convert from "data" representation back into "value" representation.
Usually invoked when loading from array.
See notes in `as_data()`
"""
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def load_from_data_pointer(self, builder, ptr, align=None):
values = []
for i, model in enumerate(self._models):
elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
val = model.load_from_data_pointer(builder, elem_ptr, align)
values.append(val)
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, val in enumerate(values):
struct = self.set(builder, struct, val, i)
return struct
def as_argument(self, builder, value):
return self._as("as_argument", builder, value)
def from_argument(self, builder, value):
return self._from("from_argument", builder, value)
def as_return(self, builder, value):
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_return(self, builder, value):
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def get(self, builder, val, pos):
"""Get a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
Extracted value
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.extract_value(val, [pos],
name="extracted." + self._fields[pos])
def set(self, builder, stval, val, pos):
"""Set a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
stval:
LLVM struct value
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
A new LLVM struct with the value inserted
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.insert_value(stval, val, [pos],
name="inserted." + self._fields[pos])
def get_field_position(self, field):
try:
return self._fields.index(field)
except ValueError:
raise KeyError("%s does not have a field named %r"
% (self.__class__.__name__, field))
@property
def field_count(self):
return len(self._fields)
def get_type(self, pos):
"""Get the frontend type (numba type) of a field given the position
or the fieldname
Args
----
pos: int or str
field index or field name
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return self._members[pos]
def get_model(self, pos):
"""
Get the datamodel of a field given the position or the fieldname.
Args
----
pos: int or str
field index or field name
"""
return self._models[pos]
def traverse(self, builder):
def getter(k, value):
if value.type != self.get_value_type():
args = self.get_value_type(), value.type
raise TypeError("expecting {0} but got {1}".format(*args))
return self.get(builder, value, k)
return [(self.get_type(k), partial(getter, k)) for k in self._fields]
def inner_models(self):
return self._models
@register_default(types.Complex)
class ComplexModel(StructModel):
_element_type = NotImplemented
def __init__(self, dmm, fe_type):
members = [
('real', fe_type.underlying_float),
('imag', fe_type.underlying_float),
]
super(ComplexModel, self).__init__(dmm, fe_type, members)
@register_default(types.LiteralList)
@register_default(types.LiteralStrKeyDict)
@register_default(types.Tuple)
@register_default(types.NamedTuple)
@register_default(types.StarArgTuple)
class TupleModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('f' + str(i), t) for i, t in enumerate(fe_type)]
super(TupleModel, self).__init__(dmm, fe_type, members)
@register_default(types.UnionType)
class UnionModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('tag', types.uintp),
# XXX: it should really be a MemInfoPointer(types.voidptr)
('payload', types.Tuple.from_types(fe_type.types)),
]
super(UnionModel, self).__init__(dmm, fe_type, members)
@register_default(types.Pair)
class PairModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('first', fe_type.first_type),
('second', fe_type.second_type)]
super(PairModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListPayload)
class ListPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
# The fields are mutable but the payload is always manipulated
# by reference. This scheme allows mutations of an array to
# be seen by its iterators.
members = [
('size', types.intp),
('allocated', types.intp),
# This member is only used only for reflected lists
('dirty', types.boolean),
# Actually an inlined var-sized array
('data', fe_type.container.dtype),
]
super(ListPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.List)
class ListModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type)
members = [
# The meminfo data points to a ListPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected lists
('parent', types.pyobject),
]
super(ListModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListIter)
class ListIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type.container)
members = [
# The meminfo data points to a ListPayload (shared with the
# original list object)
('meminfo', types.MemInfoPointer(payload_type)),
('index', types.EphemeralPointer(types.intp)),
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetEntry)
class SetEntryModel(StructModel):
def __init__(self, dmm, fe_type):
dtype = fe_type.set_type.dtype
members = [
# -1 = empty, -2 = deleted
('hash', types.intp),
('key', dtype),
]
super(SetEntryModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetPayload)
class SetPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
entry_type = types.SetEntry(fe_type.container)
members = [
# Number of active + deleted entries
('fill', types.intp),
# Number of active entries
('used', types.intp),
# Allocated size - 1 (size being a power of 2)
('mask', types.intp),
# Search finger
('finger', types.intp),
# This member is only used only for reflected sets
('dirty', types.boolean),
# Actually an inlined var-sized array
('entries', entry_type),
]
super(SetPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.Set)
class SetModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type)
members = [
# The meminfo data points to a SetPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected sets
('parent', types.pyobject),
]
super(SetModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetIter)
class SetIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type.container)
members = [
# The meminfo data points to a SetPayload (shared with the
# original set object)
('meminfo', types.MemInfoPointer(payload_type)),
# The index into the entries table
('index', types.EphemeralPointer(types.intp)),
]
super(SetIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.Array)
@register_default(types.Buffer)
@register_default(types.ByteArray)
@register_default(types.Bytes)
@register_default(types.MemoryView)
@register_default(types.PyArray)
class ArrayModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [
('meminfo', types.MemInfoPointer(fe_type.dtype)),
('parent', types.pyobject),
('nitems', types.intp),
('itemsize', types.intp),
('data', types.CPointer(fe_type.dtype)),
('shape', types.UniTuple(types.intp, ndim)),
('strides', types.UniTuple(types.intp, ndim)),
]
super(ArrayModel, self).__init__(dmm, fe_type, members)
@register_default(types.ArrayFlags)
class ArrayFlagsModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('parent', fe_type.array_type),
]
super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
@register_default(types.NestedArray)
class NestedArrayModel(ArrayModel):
def __init__(self, dmm, fe_type):
self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
super(NestedArrayModel, self).__init__(dmm, fe_type)
def get_data_type(self):
ret = ir.ArrayType(self._be_type, self._fe_type.nitems)
return ret
@register_default(types.Optional)
class OptionalModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.type),
('valid', types.boolean),
]
self._value_model = dmm.lookup(fe_type.type)
super(OptionalModel, self).__init__(dmm, fe_type, members)
def get_return_type(self):
return self._value_model.get_return_type()
def as_return(self, builder, value):
raise NotImplementedError
def from_return(self, builder, value):
return self._value_model.from_return(builder, value)
def traverse(self, builder):
def get_data(value):
valid = get_valid(value)
data = self.get(builder, value, "data")
return builder.select(valid, data, ir.Constant(data.type, None))
def get_valid(value):
return self.get(builder, value, "valid")
return [(self.get_type("data"), get_data),
(self.get_type("valid"), get_valid)]
@register_default(types.Record)
class RecordModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(RecordModel, self).__init__(dmm, fe_type)
self._models = [self._dmm.lookup(t) for _, t in fe_type.members]
self._be_type = ir.ArrayType(ir.IntType(8), fe_type.size)
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""Passed around as reference to underlying data
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_ptr_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.UnicodeCharSeq)
class UnicodeCharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(UnicodeCharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(numpy_support.sizeof_unicode_char * 8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
@register_default(types.CharSeq)
class CharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(CharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
class CContiguousFlatIter(StructModel):
def __init__(self, dmm, fe_type, need_indices):
assert fe_type.array_type.layout == 'C'
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('stride', types.intp),
('index', types.EphemeralPointer(types.intp)),
]
if need_indices:
# For ndenumerate()
members.append(('indices', types.EphemeralArray(types.intp, ndim)))
super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
class FlatIter(StructModel):
def __init__(self, dmm, fe_type):
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(FlatIter, self).__init__(dmm, fe_type, members)
@register_default(types.UniTupleIter)
class UniTupleIter(StructModel):
def __init__(self, dmm, fe_type):
members = [('index', types.EphemeralPointer(types.intp)),
('tuple', fe_type.container,)]
super(UniTupleIter, self).__init__(dmm, fe_type, members)
@register_default(types.SliceType)
class SliceModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('start', types.intp),
('stop', types.intp),
('step', types.intp),
]
super(SliceModel, self).__init__(dmm, fe_type, members)
@register_default(types.NPDatetime)
@register_default(types.NPTimedelta)
class NPDatetimeModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(64)
super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.ArrayIterator)
class ArrayIterator(StructModel):
def __init__(self, dmm, fe_type):
# We use an unsigned index to avoid the cost of negative index tests.
members = [('index', types.EphemeralPointer(types.uintp)),
('array', fe_type.array_type)]
super(ArrayIterator, self).__init__(dmm, fe_type, members)
@register_default(types.EnumerateType)
class EnumerateType(StructModel):
def __init__(self, dmm, fe_type):
members = [('count', types.EphemeralPointer(types.intp)),
('iter', fe_type.source_type)]
super(EnumerateType, self).__init__(dmm, fe_type, members)
@register_default(types.ZipType)
class ZipType(StructModel):
def __init__(self, dmm, fe_type):
members = [('iter%d' % i, source_type.iterator_type)
for i, source_type in enumerate(fe_type.source_types)]
super(ZipType, self).__init__(dmm, fe_type, members)
@register_default(types.RangeIteratorType)
class RangeIteratorType(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.yield_type
members = [('iter', types.EphemeralPointer(int_type)),
('stop', int_type),
('step', int_type),
('count', types.EphemeralPointer(int_type))]
super(RangeIteratorType, self).__init__(dmm, fe_type, members)
@register_default(types.Generator)
class GeneratorModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(GeneratorModel, self).__init__(dmm, fe_type)
# XXX Fold this in DataPacker?
self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
if not isinstance(t, types.Omitted)]
self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
self._args_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._arg_models])
self._state_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._state_models])
# The whole generator closure
self._be_type = ir.LiteralStructType(
[self._dmm.lookup(types.int32).get_value_type(),
self._args_be_type, self._state_be_type])
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""
The generator closure is passed around as a reference.
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
stack = cgutils.alloca_once(builder, value.type)
builder.store(value, stack)
return stack
@register_default(types.ArrayCTypes)
class ArrayCTypesModel(StructModel):
def __init__(self, dmm, fe_type):
# ndim = fe_type.ndim
members = [('data', types.CPointer(fe_type.dtype)),
('meminfo', types.MemInfoPointer(fe_type.dtype))]
super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
@register_default(types.RangeType)
class RangeModel(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.iterator_type.yield_type
members = [('start', int_type),
('stop', int_type),
('step', int_type)]
super(RangeModel, self).__init__(dmm, fe_type, members)
# =============================================================================
@register_default(types.NumpyNdIndexType)
class NdIndexModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [('shape', types.UniTuple(types.intp, ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(NdIndexModel, self).__init__(dmm, fe_type, members)
@register_default(types.NumpyFlatType)
def handle_numpy_flat_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=False)
else:
return FlatIter(dmm, ty)
@register_default(types.NumpyNdEnumerateType)
def handle_numpy_ndenumerate_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=True)
else:
return FlatIter(dmm, ty)
@register_default(types.BoundFunction)
def handle_bound_function(dmm, ty):
# The same as the underlying type
return dmm[ty.this]
@register_default(types.NumpyNdIterType)
class NdIter(StructModel):
def __init__(self, dmm, fe_type):
array_types = fe_type.arrays
ndim = fe_type.ndim
shape_len = ndim if fe_type.need_shaped_indexing else 1
members = [('exhausted', types.EphemeralPointer(types.boolean)),
('arrays', types.Tuple(array_types)),
# The iterator's main shape and indices
('shape', types.UniTuple(types.intp, shape_len)),
('indices', types.EphemeralArray(types.intp, shape_len)),
]
# Indexing state for the various sub-iterators
# XXX use a tuple instead?
for i, sub in enumerate(fe_type.indexers):
kind, start_dim, end_dim, _ = sub
member_name = 'index%d' % i
if kind == 'flat':
# A single index into the flattened array
members.append((member_name, types.EphemeralPointer(types.intp)))
elif kind in ('scalar', 'indexed', '0d'):
# Nothing required
pass
else:
assert 0
# Slots holding values of the scalar args
# XXX use a tuple instead?
for i, ty in enumerate(fe_type.arrays):
if not isinstance(ty, types.Array):
member_name = 'scalar%d' % i
members.append((member_name, types.EphemeralPointer(ty)))
super(NdIter, self).__init__(dmm, fe_type, members)
@register_default(types.DeferredType)
class DeferredStructModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(DeferredStructModel, self).__init__(dmm, fe_type)
self.typename = "deferred.{0}".format(id(fe_type))
self.actual_fe_type = fe_type.get()
def get_value_type(self):
return ir.global_context.get_identified_type(self.typename + '.value')
def get_data_type(self):
return ir.global_context.get_identified_type(self.typename + '.data')
def get_argument_type(self):
return self._actual_model.get_argument_type()
def as_argument(self, builder, value):
inner = self.get(builder, value)
return self._actual_model.as_argument(builder, inner)
def from_argument(self, builder, value):
res = self._actual_model.from_argument(builder, value)
return self.set(builder, self.make_uninitialized(), res)
def from_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.from_data(builder, elem)
out = self.make_uninitialized()
return self.set(builder, out, value)
def as_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.as_data(builder, elem)
out = self.make_uninitialized(kind='data')
return self.set(builder, out, value)
def from_return(self, builder, value):
return value
def as_return(self, builder, value):
return value
def get(self, builder, value):
return builder.extract_value(value, [0])
def set(self, builder, value, content):
return builder.insert_value(value, content, [0])
def make_uninitialized(self, kind='value'):
self._define()
if kind == 'value':
ty = self.get_value_type()
else:
ty = self.get_data_type()
return ir.Constant(ty, ir.Undefined)
def _define(self):
valty = self.get_value_type()
self._define_value_type(valty)
datty = self.get_data_type()
self._define_data_type(datty)
def _define_value_type(self, value_type):
if value_type.is_opaque:
value_type.set_body(self._actual_model.get_value_type())
def _define_data_type(self, data_type):
if data_type.is_opaque:
data_type.set_body(self._actual_model.get_data_type())
@property
def _actual_model(self):
return self._dmm.lookup(self.actual_fe_type)
def traverse(self, builder):
return [(self.actual_fe_type,
lambda value: builder.extract_value(value, [0]))]
@register_default(types.StructRefPayload)
class StructPayloadModel(StructModel):
"""Model for the payload of a mutable struct
"""
def __init__(self, dmm, fe_typ):
members = tuple(fe_typ.field_dict.items())
super().__init__(dmm, fe_typ, members)
class StructRefModel(StructModel):
"""Model for a mutable struct.
A reference to the payload
"""
def __init__(self, dmm, fe_typ):
dtype = fe_typ.get_data_type()
members = [
("meminfo", types.MemInfoPointer(dtype)),
]
super().__init__(dmm, fe_typ, members)
| {
"repo_name": "stonebig/numba",
"path": "numba/core/datamodel/models.py",
"copies": "3",
"size": "44192",
"license": "bsd-2-clause",
"hash": -264198707642144770,
"line_mean": 31.0929557008,
"line_max": 83,
"alpha_frac": 0.6056299783,
"autogenerated": false,
"ratio": 3.7751580386126773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5880788016912677,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import deque
from tornado.ioloop import IOLoop
from ..commands import command, define_command
from ..entity import entity_method
from .. import actions, events
@entity_method
def teleport(self, dest_path):
"""
Moves the entity directly to the specified destination.
"""
dest = dest_path and self.db.get_object(dest_path)
actions.do(self, 'travel', observers=[],
origin=self._parent, dest=dest, exit_portal=None, entry_portal=None)
@entity_method
def move(self, exit_portal):
"""
Moves the entity through an exit with the given label.
"""
dest_path, entry_direction = exit_portal.portal.link
dest = self.db.get_object(dest_path)
if not dest:
self.show_text('A strange force is blocking your way.')
return
entry_portal = entry_direction and dest.location.exits.get(entry_direction)
actions.do(self, 'travel', observers=[],
origin=self.parent, dest=dest, exit_portal=exit_portal, entry_portal=entry_portal)
@entity_method
def move_direction(self, exit_label):
exit_portal = self.parent.location.exits.get(exit_label)
if exit_portal:
self.move(exit_portal)
else:
self.show_text('You cannot go that direction.')
@events.subscriber
def _travel(event):
"""
Publishes 'exit' and 'enter' events to the two locations involved in a
movement action.
"""
if event.origin:
events.publish(events.make_event_name(event.phase, 'exit'),
actor=event.actor, location=event.origin, portal=event.exit_portal,
observers=actions.default_observers(event.origin))
if event.dest:
events.publish(events.make_event_name(event.phase, 'enter'),
actor=event.actor, location=event.dest, portal=event.entry_portal,
observers=actions.default_observers(event.dest))
@events.subscriber
def will_exit(event):
direction = event.portal and event.portal.label
for player in event.actor.parent.get_contents('player'):
player.session.send_message('didExit', event.actor.path, direction)
@events.subscriber
def do_exit(event):
event.location.container.contents.pop(event.actor.path)
event.actor._parent = None
@events.subscriber
def do_enter(event):
event.location.container.contents[event.actor.path] = event.actor
event.actor._parent = event.location
def did_enter(event):
# Describe the new location to the player.
actor = event.actor
if hasattr(actor, 'player'):
player = actor.player
player.location = actor.parent.path
actor.describe_room()
actor.update_map()
neighbors = [actor.get_neighbor_properties(entity)
for entity in actor.parent.get_contents() if entity is not actor]
actor.session.send_message('setNeighbors', neighbors)
# Notify other players.
direction = event.portal and event.portal.label
for player in event.actor.parent.get_contents('player'):
if event.actor is not player:
properties = player.get_neighbor_properties(event.actor)
player.session.send_message('didEnter', properties, direction)
events.subscribe(did_enter, 'did_enter', 1)
@entity_method
def get_nearby_locations(self, max_distance=1):
"""
Generates a sequence of all rooms reachable within `max_distance` steps from
this entity, including the entity itself. Each item in the output is a tuple
`(room, moves)` where `moves` is a list of move directions that can be used
to reach the room.
"""
visited = set([self])
queue = deque([(self, [])])
while queue:
room, moves = queue.popleft()
yield room, moves
if len(moves) < max_distance:
for direction, exit in room.location.exits.items():
dest = self.db.get_object(exit.portal.link[0])
if dest not in visited:
visited.add(dest)
queue.append((dest, moves + [direction]))
@command('go portal=(location,?portal)')
def go_command(actor, portal):
"""
Move to another location.
With the `go` command you can specify a direction, such as `go north`, or
the name of an object you want to go into, such as `go cabin`.
To move in a direction you can also just type the name of the direction,
such as `northwest` or `down`, or its abbreviated form, such as `nw` or `d`.
"""
if not portal:
actor.show_text('Where do you want to go?')
elif len(portal) > 1:
actor.show_text('You can only go one place at a time.')
else:
actor.move(portal[0])
def direction_command(direction, actor):
actor.move_direction(direction)
for direction, shortcut in (
('north', 'n'),
('northeast', 'ne'),
('east', 'e'),
('southeast', 'se'),
('south', 's'),
('southwest', 'sw'),
('west', 'w'),
('northwest', 'nw'),
('up', 'u'),
('down', 'd')):
define_command('{},{}'.format(direction, shortcut), go_command.__doc__,
partial(direction_command, direction))
| {
"repo_name": "wirefish/amber",
"path": "amber/systems/movement.py",
"copies": "1",
"size": "5201",
"license": "bsd-3-clause",
"hash": -6185169918418863000,
"line_mean": 34.6232876712,
"line_max": 97,
"alpha_frac": 0.6402614882,
"autogenerated": false,
"ratio": 3.8270787343635027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9942635486172693,
"avg_score": 0.004940947278162079,
"num_lines": 146
} |
from functools import partial
from collections import namedtuple, defaultdict
import struct
from zorro.util import cached_property
try:
from .shm import ShmPixbuf
except ImportError:
import warnings
warnings.warn('Shm is not available, expect poor performance.')
try:
from .pixbuf import Pixbuf
except ImportError:
import warnings
warnings.warn('Cairo is not available, no drawing would work')
fmtlen = {
0: 0,
8: 1,
16: 2,
32: 4,
}
fmtchar = {
8: 'B',
16: 'H',
32: 'L',
}
class Rectangle(namedtuple('_Rectangle', 'x y width height')):
__slots__ = ()
class Const(int):
def __new__(cls, val, name):
return super().__new__(cls, val)
def __init__(self, val, name):
self.name = name
def __repr__(self):
return '<{} {}:{}>'.format(self.__class__.__name__, self.name, self)
class Atom(Const):
pass
class AtomWrapper(object):
def __init__(self, connection, proto):
self._conn = connection
self.proto = proto
self._atoms = {}
for k, v in self.proto.enums['Atom'].items():
atom = Atom(v, k)
self._atoms[v] = atom
setattr(self, k, atom)
def __getattr__(self, name):
assert name.isidentifier()
props = self._conn.do_request(self.proto.requests['InternAtom'],
only_if_exists=False,
name=name,
)
atom = Atom(props['atom'], name)
self._atoms[props['atom']] = atom
setattr(self, name, atom)
return atom
def __getitem__(self, value):
try:
return self._atoms[value]
except KeyError:
props = self._conn.do_request(self.proto.requests['GetAtomName'],
atom=value)
atom = Atom(value, props['name'])
self._atoms[value] = atom
setattr(self, props['name'], atom)
return atom
class EnumWrapper(object):
def __init__(self, enums):
for k, v in enums.items():
setattr(self, k, Const(v, k))
class RawWrapper(object):
def __init__(self, conn, proto, opcode=None):
self._conn = conn
self._proto = proto
self._opcode = opcode
def __getattr__(self, name):
return partial(self._conn.do_request,
self._proto.requests[name], _opcode=self._opcode)
class Core(object):
def __init__(self, connection):
self._conn = connection
self._conn.connection()
self.proto = connection.proto.subprotos['xproto']
self.atom = AtomWrapper(connection, self.proto)
self.raw = RawWrapper(connection, self.proto)
for k, lst in self.proto.enums.items():
setattr(self, k, EnumWrapper(lst))
for k, v in connection.proto.subprotos.items():
if not v.extension:
continue
ext = connection.query_extension(k)
if not ext['present']:
continue
rw = RawWrapper(self._conn, v, ext['major_opcode'])
setattr(self, k, rw)
for ename, lst in v.enums.items():
setattr(rw, ename, EnumWrapper(lst))
self.root = self._conn.init_data['roots'][0]
self.root_window = self.root['root']
pad = self._conn.init_data['bitmap_format_scanline_pad']
assert pad % 8 == 0
self.bitmap_stride = pad//8
self.current_event = None
self.last_event = None
self.last_time = 0
self._event_iterator = self._events()
def init_keymap(self):
self.keycode_to_keysym = {}
self.shift_keycode_to_keysym = {}
self.keysym_to_keycode = defaultdict(list)
idata = self._conn.init_data
mapping = self.raw.GetKeyboardMapping(
first_keycode=idata['min_keycode'],
count=idata['max_keycode'] - idata['min_keycode'],
)
mapiter = iter(mapping['keysyms'])
for row in zip(range(idata['min_keycode'], idata['max_keycode']),
*(mapiter for i in range(mapping['keysyms_per_keycode']))):
self.keycode_to_keysym[row[0]] = row[1]
self.shift_keycode_to_keysym[row[0]] = row[2]
self.keysym_to_keycode[row[1]].append(row[0])
caps = self.ModMask.Lock # caps lock
num = getattr(self.ModMask, '2') # mod2 is usually numlock
mode = getattr(self.ModMask, '5') # mod5 is usually mode_switch
self.modifiers_mask = ~(caps|num|mode)
def create_toplevel(self, bounds, border=0, klass=None, params={}):
return self.create_window(bounds,
border=border,
klass=klass,
parent=self.root_window,
params=params)
def create_window(self, bounds, border=0, klass=None, parent=0, params={}):
wid = self._conn.new_xid()
root = self.root
self.raw.CreateWindow(**{
'wid': wid,
'root': root['root'],
'depth': 0,
'parent': parent or root['root'],
'visual': 0,
'x': bounds.x,
'y': bounds.y,
'width': bounds.width,
'height': bounds.height,
'border_width': border,
'class': klass,
'params': params,
})
return wid
def send_event(self, event_type, event_mask, dest, **kw):
etype = self.proto.events[event_type]
buf = bytearray([etype.number])
etype.write_to(buf, kw)
buf[2:2] = b'\x00\x00'
buf += b'\x00'*(32 - len(buf))
self.raw.SendEvent(
propagate=False,
destination=dest,
event_mask=event_mask,
event=buf,
)
def get_property(self, win, name):
result = self.raw.GetProperty(
delete=False,
window=win,
property=name,
type=self.atom.Any,
long_offset=0,
long_length=65536)
typ = self.atom[result['type']]
if result['format'] == 0:
return typ, None
elif typ in (self.atom.STRING, self.atom.UTF8_STRING):
return typ, result['value'].decode('utf-8', 'replace')
return typ, struct.unpack('<{}{}'.format(
len(result['value']) // fmtlen[result['format']],
fmtchar[result['format']]),
result['value'])
def _events(self):
for i in self._conn.get_events():
try:
self.current_event = i
self.last_event = i
if hasattr(i, 'time'):
self.last_time = i.time
yield i
finally:
self.current_event = None
def get_events(self):
return self._event_iterator
def pixbuf(self, width, height):
if width*height < 1024:
return Pixbuf(width, height, self)
elif hasattr(self, 'shm') and ShmPixbuf:
return ShmPixbuf(width, height, self)
elif hasattr(self, 'bigreq') or width*height*4 < 260000:
return Pixbuf(width, height, self)
@cached_property
def pixbuf_gc(self):
res = self._conn.new_xid()
self.raw.CreateGC(
cid=res,
drawable=self.root_window,
params={},
)
return res
| {
"repo_name": "tailhook/tilenol",
"path": "tilenol/xcb/core.py",
"copies": "1",
"size": "7370",
"license": "mit",
"hash": 3988127496228673500,
"line_mean": 28.8380566802,
"line_max": 79,
"alpha_frac": 0.5362279512,
"autogenerated": false,
"ratio": 3.8048528652555498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9811298494453686,
"avg_score": 0.005956464400372746,
"num_lines": 247
} |
from functools import partial
from collections import namedtuple
from copy import copy
from http.cookies import SimpleCookie
from urllib.parse import urlparse, urljoin
from pulsar.api import PulsarException
from pulsar.apps.ws import WebSocketProtocol, WS
from pulsar.utils.httpurl import (
REDIRECT_CODES, requote_uri, get_hostport, host_no_default_port,
tls_schemes
)
from pulsar.utils.websocket import SUPPORTED_VERSIONS, websocket_key
requestKey = namedtuple('requestKey', 'scheme host port tunnel verify cert')
class RequestKey(requestKey):
@classmethod
def create(cls, request):
url = urlparse(request.proxy or request.url)
host, port = get_hostport(url.scheme, url.netloc)
return cls(
url.scheme, host, port,
request.tunnel,
request.verify, request.cert
)
@property
def address(self):
return self.host, self.port
@property
def netloc(self):
return host_no_default_port(self.scheme, '%s:%s' % self.address)
@property
def tunnel_address(self):
if self.tunnel:
url = urlparse(self.tunnel)
return get_hostport(url.scheme, url.netloc)
def ssl(self, client):
if self.scheme in tls_schemes:
if isinstance(self.cert, tuple):
certfile, keyfile = self.cert
else:
certfile, keyfile = self.cert, None
return client.ssl_context(
verify=self.verify,
certfile=certfile,
keyfile=keyfile
)
def keep_alive(version, headers):
"""Check if to keep alive an HTTP connection.
If the version is 1.1, we close the connection only if the ``connection``
header is available and set to ``close``
"""
if version == '1.1':
return not headers.get('connection') == 'close'
else:
return headers.get('connection') == 'keep-alive'
async def start_request(request, conn):
response = conn.current_consumer()
# bind request-specific events
response.bind_events(request.inp_params)
if request.auth:
response.event('pre_request').bind(request.auth)
response.start(request)
if request.stream:
await response.event('on_headers').waiter()
else:
await response.event('post_request').waiter()
if hasattr(response.request_again, '__call__'):
response = response.request_again(response)
try:
response = await response
except TypeError:
pass
return response
class request_again(namedtuple('request_again', 'method url params')):
@property
def status_code(self):
return -1
@property
def headers(self):
return ()
class TooManyRedirects(PulsarException):
def __init__(self, response):
self.response = response
class WebSocketClient(WebSocketProtocol):
status_code = 101
@property
def request(self):
return self.handshake.request
@property
def headers(self):
return self.handshake.headers
def raise_for_status(self):
pass
def __getattr__(self, name):
if not name.startswith('__'):
return getattr(self.handshake, name)
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
class Expect:
def __call__(self, response, exc=None):
if response.status_code == 100:
expect = response.request.headers.get('expect')
if expect and expect.lower() == '100-continue':
response.request_again = self._response
def _response(self, response):
request = response.request
request.encode = self.empty
return start_request(request, response.connection)
def empty(self):
return b''
class Redirect:
def __call__(self, response, exc=None):
if (response.status_code in REDIRECT_CODES and
'location' in response.headers and
response.request.allow_redirects):
response.request_again = self._do_redirect
def _do_redirect(self, response):
request = response.request
# done with current response
url = response.headers.get('location')
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(request.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of
# 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
url = urljoin(request.url,
# Compliant with RFC3986, we percent
# encode the url.
requote_uri(url))
history = request.history
if history and len(history) >= request.max_redirects:
raise TooManyRedirects(response)
params = request.inp_params.copy()
params['history'] = copy(history) if history else []
params['history'].append(response)
if response.status_code == 303:
method = 'GET'
params.pop('data', None)
params.pop('files', None)
else:
method = request.method
response.request_again = request_again(method, url, params)
return response
def handle_cookies(response, exc=None):
'''Handle response cookies.
'''
if exc:
return
headers = response.headers
request = response.request
client = request.client
response._cookies = c = SimpleCookie()
if 'set-cookie' in headers or 'set-cookie2' in headers:
for cookie in (headers.get('set-cookie2'),
headers.get('set-cookie')):
if cookie:
c.load(cookie)
if client.store_cookies:
client.cookies.extract_cookies(response, request)
class WebSocket:
@property
def websocket_key(self):
if not hasattr(self, '_websocket_key'):
self._websocket_key = websocket_key()
return self._websocket_key
def __call__(self, response, exc=None):
request = response.request
if (not exc and request and
urlparse(request.url).scheme in ('ws', 'wss')):
headers = request.headers
headers['connection'] = 'Upgrade'
headers['upgrade'] = 'websocket'
if 'Sec-WebSocket-Version' not in headers:
headers['Sec-WebSocket-Version'] = str(max(SUPPORTED_VERSIONS))
if 'Sec-WebSocket-Key' not in headers:
headers['Sec-WebSocket-Key'] = self.websocket_key
response.event('on_headers').bind(self.on_headers)
def on_headers(self, response, exc=None):
'''Websocket upgrade as ``on_headers`` event.'''
if response.status_code == 101:
connection = response.connection
request = response.request
handler = request.websocket_handler
if not handler:
handler = WS()
parser = request.client.frame_parser(kind=1)
consumer = partial(WebSocketClient.create,
response, handler, parser)
connection.upgrade(consumer)
response.event('post_request').fire()
websocket = connection.current_consumer()
response.request_again = lambda r: websocket
class InfoHeaders:
__slots__ = ('headers',)
def __init__(self, headers):
self.headers = headers
def get_all(self, key, default=None):
return self.headers.getall(key, default)
| {
"repo_name": "quantmind/pulsar",
"path": "pulsar/apps/http/plugins.py",
"copies": "1",
"size": "7783",
"license": "bsd-3-clause",
"hash": -209100073886835870,
"line_mean": 29.5215686275,
"line_max": 79,
"alpha_frac": 0.5979699345,
"autogenerated": false,
"ratio": 4.345616973757677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 255
} |
from functools import partial
from collections import namedtuple
from copy import copy
from urllib.parse import urlparse, urljoin
from pulsar import OneTime, Future, task
from pulsar.apps.ws import WebSocketProtocol, WS
from pulsar.utils.internet import is_tls
from pulsar.utils.httpurl import REDIRECT_CODES, requote_uri, SimpleCookie
from pulsar import PulsarException
def noerror(callback):
'''Decorator to run a callback of a :class:`.EventHandler`
only when no errors occur
'''
def _(*response, **kw):
if response[-1] and not kw.get('exc'):
return callback(*response)
return _
class request_again(namedtuple('request_again', 'method url params')):
@property
def status_code(self):
return -1
@property
def headers(self):
return ()
class TooManyRedirects(PulsarException):
def __init__(self, response):
self.response = response
class WebSocketClient(WebSocketProtocol):
status_code = 101
@property
def _request(self):
return self.handshake._request
@property
def headers(self):
return self.handshake.headers
def __getattr__(self, name):
if not name.startswith('__'):
return getattr(self.handshake, name)
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
@noerror
def handle_redirect(response, exc=None):
if (response.status_code in REDIRECT_CODES and
'location' in response.headers and
response._request.allow_redirects):
# put at the end of the pile
response.bind_event('post_request', _do_redirect)
@noerror
def _do_redirect(response, exc=None):
request = response.request
# done with current response
url = response.headers.get('location')
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(request.full_url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of
# 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
url = urljoin(request.full_url,
# Compliant with RFC3986, we percent
# encode the url.
requote_uri(url))
history = request.history
if history and len(history) >= request.max_redirects:
response.request_again = TooManyRedirects(response)
else:
params = request.inp_params.copy()
params['history'] = copy(history) if history else []
params['history'].append(response)
if response.status_code == 303:
method = 'GET'
params.pop('data', None)
params.pop('files', None)
else:
method = request.method
response.request_again = request_again(method, url, params)
@noerror
def handle_cookies(response, exc=None):
'''Handle response cookies.
'''
headers = response.headers
request = response.request
client = request.client
response._cookies = c = SimpleCookie()
if 'set-cookie' in headers or 'set-cookie2' in headers:
for cookie in (headers.get('set-cookie2'),
headers.get('set-cookie')):
if cookie:
c.load(cookie)
if client.store_cookies:
client.cookies.extract_cookies(response, request)
@noerror
def handle_100(response, exc=None):
'''Handle Except: 100-continue.
This is a ``on_header`` hook which checks if the request headers
have the ``Expect: 100-continue`` value. If so add a ``on_headers``
callback to handle the response from the server.
'''
request = response.request
if (request.headers.has('expect', '100-continue') and
response.status_code == 100):
response.bind_event('on_headers', _write_body)
@noerror
def _write_body(response, exc=None):
if response.status_code == 100:
response.request.new_parser()
if response.request.data:
response.write(response.request.data)
@noerror
def handle_101(response, exc=None):
'''Websocket upgrade as ``on_headers`` event.'''
if response.status_code == 101:
connection = response.connection
request = response._request
handler = request.websocket_handler
if not handler:
handler = WS()
parser = request.client.frame_parser(kind=1)
body = response.recv_body()
connection.upgrade(partial(WebSocketClient, response, handler, parser))
response.finished()
consumer = connection.current_consumer()
consumer.data_received(body)
response.request_again = consumer
class Tunneling:
'''A pre request callback for handling proxy tunneling.
If Tunnelling is required, it writes the CONNECT headers and abort
the writing of the actual request until headers from the proxy server
are received.
'''
def __init__(self, loop):
assert loop
self._loop = loop
@noerror
def __call__(self, response, exc=None):
# the pre_request handler
request = response._request
if request:
tunnel = request._tunnel
if tunnel:
if getattr(request, '_apply_tunnel', False):
# if transport is not SSL already
if not is_tls(response.transport.get_extra_info('socket')):
response._request = tunnel
response.bind_event('on_headers', self.on_headers)
else:
# Append self again as pre_request
request._apply_tunnel = True
response.bind_event('pre_request', self)
@noerror
def on_headers(self, response, exc=None):
'''Called back once the headers have arrived.'''
if response.status_code == 200:
response.bind_event('post_request', self._tunnel_consumer)
response.finished()
@noerror
def _tunnel_consumer(self, response, exc=None):
response.transport.pause_reading()
# Return a coroutine which wraps the socket
# at the next iteration loop. Important!
return self.switch_to_ssl(response)
@task
def switch_to_ssl(self, prev_response):
'''Wrap the transport for SSL communication.'''
request = prev_response._request.request
connection = prev_response._connection
loop = connection._loop
sock = connection._transport._sock
# set a new connection_made event
connection.events['connection_made'] = OneTime(loop=loop)
connection._processed -= 1
connection.producer._requests_processed -= 1
waiter = Future(loop=loop)
loop._make_ssl_transport(sock, connection, request._ssl,
waiter, server_side=False,
server_hostname=request._netloc)
yield from waiter
response = connection.current_consumer()
response.start(request)
yield from response.on_finished
if response.request_again:
response = response.request_again
prev_response.request_again = response
| {
"repo_name": "tempbottle/pulsar",
"path": "pulsar/apps/http/plugins.py",
"copies": "5",
"size": "7367",
"license": "bsd-3-clause",
"hash": 477573883161826500,
"line_mean": 31.8883928571,
"line_max": 79,
"alpha_frac": 0.6170761504,
"autogenerated": false,
"ratio": 4.300642148277875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7417718298677876,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import namedtuple
import xml.dom
from prang.simplification import PrangException
class PrangNode():
def __init__(self, attrs, children):
self._attrs = attrs
self._children = tuple(children)
@property
def attrs(self):
return self._attrs
@property
def children(self):
return self._children
class SchemaElement():
def __init__(self, atts, *children):
self.defs = None
self.ref_name = None
self.element = None
self._atts = atts
# Check that all children are either strings or SchemaElements
for c in children:
if not isinstance(c, (str, SchemaElement)):
raise Exception("The children must be strings or Elements", c)
self._children = children
def _resolve(self):
if self.element is None:
if self.ref_name is None:
self.element = self
else:
self.element = self.defs[self.ref_name]
@property
def atts(self):
self._resolve()
return self.element._atts
@property
def children(self):
self._resolve()
return self.element._children
def __str__(self):
return self.__repr__()
def __repr__(self):
if self.ref_name is None:
args = [k + '=' + repr(v) for k, v in self._atts.items()]
args += [repr(a) for a in self._children]
return self.__class__.__name__ + "(" + ', '.join(args) + ")"
else:
return "Ref(" + self.ref_name + ")"
class Element(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class Choice(SchemaElement):
def __init__(self, p1, p2):
SchemaElement.__init__(self, {}, p1, p2)
class Start(SchemaElement):
def __init__(self, p):
SchemaElement.__init__(self, {}, p)
class Name(SchemaElement):
def __init__(self, atts, nc):
SchemaElement.__init__(self, atts, nc)
class Data(SchemaElement):
def __init__(self, atts, *children):
SchemaElement.__init__(self, atts, *children)
class NsName(SchemaElement):
def __init__(self, atts, *children):
SchemaElement.__init__(self, atts, *children)
class Except(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class Empty(SchemaElement):
def __init__(self):
SchemaElement.__init__(self, {})
class List(SchemaElement):
def __init__(self, p):
SchemaElement.__init__(self, {}, p)
class AnyName(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class Group(SchemaElement):
def __init__(self, p1, p2):
SchemaElement.__init__(self, {}, p1, p2)
class Text(SchemaElement):
def __init__(self):
SchemaElement.__init__(self, {})
class Value(SchemaElement):
def __init__(self, atts, *children):
SchemaElement.__init__(self, atts, *children)
class Attribute(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class OneOrMore(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class Interleave(SchemaElement):
def __init__(self, p1, p2):
SchemaElement.__init__(self, {}, p1, p2)
class Define(SchemaElement):
def __init__(self, atts, p):
SchemaElement.__init__(self, atts, p)
class Grammar(SchemaElement):
def __init__(self, *children):
SchemaElement.__init__(self, {}, *children)
class Param(SchemaElement):
def __init__(self, atts, *children):
SchemaElement.__init__(self, atts, *children)
class NotAllowed(SchemaElement):
def __init__(self, error_schema=None, error_doc=None, error_message=None):
SchemaElement.__init__(self, {})
self.error_schema = error_schema
self.error_doc = error_doc
self.error_message = error_message
class After(SchemaElement):
def __init__(self, p1, p2):
SchemaElement.__init__(self, {}, p1, p2)
class Ref():
def __init__(self, defs, ref_name):
self.defs = defs
self.ref_name = ref_name
self.node = None
def _resolve(self):
if self.node is None:
self.node = self.defs[self.ref_name]
@property
def name(self):
self._resolve()
return self.node.name
@property
def atts(self):
self._resolve()
return self.node.atts
@property
def children(self):
self._resolve()
return self.node.children
def __str__(self):
return "Ref(" + self.ref_name + ")"
def __repr__(self):
return "Ref(" + self.ref_name + ")"
EMPTY = Empty()
TEXT = Text()
def typify(grammar_el):
defs = {}
def freeze(el):
if isinstance(el, str):
return el
else:
children = tuple(freeze(c) for c in el.iter_children())
if el.name == 'ref':
elem = Element()
elem.defs = defs
elem.ref_name = el.attrs['name']
return elem
elif el.name == 'element':
return Element(*children)
elif el.name == 'choice':
return Choice(*children)
elif el.name == 'start':
return Start(*children)
elif el.name == 'name':
return Name(el.attrs, *children)
elif el.name == 'empty':
return EMPTY
elif el.name == 'text':
return TEXT
elif el.name == 'value':
return Value(el.attrs, *children)
elif el.name == 'data':
return Data(el.attrs, *children)
elif el.name == 'nsName':
return NsName(el.attrs, *children)
elif el.name == 'attribute':
return Attribute(*children)
elif el.name == 'group':
return Group(*children)
elif el.name == 'except':
return Except(*children)
elif el.name == 'anyName':
return AnyName(*children)
elif el.name == 'oneOrMore':
return OneOrMore(*children)
elif el.name == 'interleave':
return Interleave(*children)
elif el.name == 'define':
return Define(el.attrs, *children)
elif el.name == 'grammar':
return Grammar(*children)
elif el.name == 'list':
return List(*children)
elif el.name == 'param':
return Param(el.attrs, *children)
elif el.name == 'notAllowed':
return NotAllowed()
else:
raise Exception("element name not recognized " + el.name)
grammar_el = freeze(grammar_el)
for el in grammar_el.children[1:]:
defs[el.atts['name']] = el.children[0]
return grammar_el
def contains(nc, n):
if isinstance(nc, AnyName):
if len(nc.children) == 0:
return True
else:
return not contains(nc.children[0].children[0], n)
elif isinstance(nc, NsName):
if nc.atts['ns'] == n.ns:
if len(nc.children) == 0:
return True
else:
return not contains(nc.children[0].children[0], n)
else:
return False
elif isinstance(nc, Name):
# print("It's a name in contains")
# print("nc", nc)
# print("n", n)
return (nc.atts['ns'], nc.children[0]) == n
elif isinstance(nc, Choice):
return any(contains(nc, n) for nc in nc.children)
return False
def nullable(p):
if isinstance(p, (Group, Interleave)):
return all(nullable(c) for c in p.children)
elif isinstance(p, Choice):
return any(nullable(c) for c in p.children)
elif isinstance(p, OneOrMore):
return nullable(p.children[0])
elif isinstance(p, (Empty, Text)):
return True
else:
return False
def child_deriv(p, s):
if isinstance(s, str):
return text_deriv(p, s)
else:
# print("p1 is", p)
p1 = start_tag_open_deriv(p, s.qn)
# print("p1 is", p1)
p2 = atts_deriv(p1, s.atts)
# print("p2 is", p2)
p3 = start_tag_close_deriv(p2)
# print("p3 is", p3)
p4 = children_deriv(p3, s.children)
# print("p4 is", p4)
p5 = end_tag_deriv(p4)
# print("p5 is", p5)
return p5
def choice(p1, p2):
if isinstance(p2, NotAllowed):
return p1
elif isinstance(p1, NotAllowed):
return p2
else:
return Choice(p1, p2)
class NotAllowedException(PrangException):
pass
def start_tag_open_deriv(p, qn):
# print("in start tag open deriv, pattern", p)
# print("in start tag open deriv, qn", qn)
if isinstance(p, Choice):
p1, p2 = p.children
res = choice(
start_tag_open_deriv(p1, qn),
start_tag_open_deriv(p2, qn))
return res
elif isinstance(p, Element):
# print("in open deriv, it's an element.")
nc, top = p.children
# print("nc", nc)
# print("top", top)
if contains(nc, qn):
return after(top, EMPTY)
else:
return NotAllowed(p, qn)
elif isinstance(p, Interleave):
p1, p2 = p.children
return choice(
apply_after(
partial(flip(interleave), p2), start_tag_open_deriv(p1, qn)),
apply_after(partial(interleave, p1), start_tag_open_deriv(p2, qn)))
elif isinstance(p, OneOrMore):
p1 = p.children[0]
return apply_after(
partial(flip(group), choice(OneOrMore(p1), EMPTY)),
start_tag_open_deriv(p1, qn))
elif isinstance(p, Group):
p1, p2 = p.children
x = apply_after(
partial(flip(group), p2), start_tag_open_deriv(p1, qn))
if nullable(p1):
return choice(x, start_tag_open_deriv(p2, qn))
else:
return x
elif isinstance(p, After):
p1, p2 = p.children
return apply_after(
partial(flip(after), p2), start_tag_open_deriv(p1, qn))
else:
return NotAllowed(p, qn)
def text_deriv(p, s):
if isinstance(p, Choice):
p1, p2 = p.children
return choice(text_deriv(p1, s), text_deriv(p2, s))
elif isinstance(p, Interleave):
p1, p2 = p.children
return choice(
interleave(text_deriv(p1, s), p2),
interleave(p1, text_deriv(p2, s)))
elif isinstance(p, Group):
p1, p2 = p.children
pg = group(text_deriv(p1, s), p2)
if nullable(p1):
return choice(pg, text_deriv(p2, s))
else:
return pg
elif isinstance(p, After):
p1, p2 = p.children
return after(text_deriv(p1, s), p2)
elif isinstance(p, OneOrMore):
return group(text_deriv(p.children[0], s), choice(p, EMPTY))
elif isinstance(p, Text):
return TEXT
elif isinstance(p, Value):
if datatypeEqual(p, s):
return EMPTY
else:
return NotAllowed(p, s)
elif isinstance(p, Data):
params = [c for c in p.children if isinstance(c, Param)]
if datatypeAllows(p, params, s):
if len(p.children) == 0:
nc = None
else:
last_child = p.children[-1]
if isinstance(last_child, Except):
nc = last_child.children[0]
else:
nc = None
if nc is None:
return EMPTY
else:
if nullable(text_deriv(nc, s)):
return NotAllowed(p, s)
else:
return EMPTY
else:
return NotAllowed(p, s)
elif isinstance(p, List):
if nullable(list_deriv(p.children[0], s.split())):
return EMPTY
else:
return NotAllowed(p, s)
else:
return NotAllowed(p, s)
def list_deriv(p, string_list):
if len(string_list) == 0:
return p
else:
return list_deriv(text_deriv(p, string_list[0]), string_list[1:])
def group(p1, p2):
if isinstance(p1, NotAllowed):
return NotAllowed()
elif isinstance(p2, NotAllowed):
return NotAllowed()
elif isinstance(p1, Empty):
return p2
elif isinstance(p2, Empty):
return p1
else:
return Group(p1, p2)
def interleave(p1, p2):
if isinstance(p1, NotAllowed):
return NotAllowed()
elif isinstance(p2, NotAllowed):
return NotAllowed()
elif isinstance(p1, Empty):
return p2
elif isinstance(p2, Empty):
return p1
else:
return Interleave(p1, p2)
def after(p1, p2):
if any(isinstance(p, NotAllowed) for p in (p1, p2)):
return NotAllowed((p1, p2), None)
else:
return After(p1, p2)
def datatypeAllows(p, params, s):
library = p.atts['datatypeLibrary']
if library == '':
return p.atts['type'] in ('string', 'token')
elif library == 'http://www.w3.org/2001/XMLSchema-datatypes':
for param in params:
param_name = param.atts['name']
if param_name == 'minLength' and len(s) < int(param.children[0]):
return False
return True
else:
return False
def normalize_whitespace(s):
return ' '.join(s.split())
def datatypeEqual(p, s):
library = p.atts['datatypeLibrary']
child = '' if len(p.children) == 0 else p.children[0]
if library == '':
if p.atts['type'] == 'string':
return child == s
elif p.atts['type'] == 'token':
return normalize_whitespace(child) == normalize_whitespace(s)
else:
return False
elif library == 'http://www.w3.org/2001/XMLSchema-datatypes':
if p.atts['type'] == 'string':
return child == s
elif p.atts['type'] == 'token':
return normalize_whitespace(child) == normalize_whitespace(s)
else:
return False
else:
return False
def apply_after(f, p):
if isinstance(p, After):
p1, p2 = p.children
return After(p1, f(p2))
elif isinstance(p, Choice):
p1, p2 = p.children
return choice(apply_after(f, p1), apply_after(f, p2))
elif isinstance(p, NotAllowed):
return NotAllowed(p)
def flip(f):
def g(x, y):
return f(y, x)
return g
def att_deriv(p, att_node):
# print("in att deriv pattern", p)
# print("in att deriv node", att_node)
if isinstance(p, After):
p1, p2 = p.children
return after(att_deriv(p1, att_node), p2)
elif isinstance(p, Choice):
p1, p2 = p.children
return choice(att_deriv(p1, att_node), att_deriv(p2, att_node))
elif isinstance(p, Group):
p1, p2 = p.children
return choice(
group(att_deriv(p1, att_node), p2),
group(p1, att_deriv(p2, att_node)))
elif isinstance(p, Interleave):
p1, p2 = p.children
return choice(
interleave(att_deriv(p1, att_node), p2),
interleave(p1, att_deriv(p2, att_node)))
elif isinstance(p, OneOrMore):
p1 = p.children[0]
return group(att_deriv(p1, att_node), choice(OneOrMore(p1), EMPTY))
elif isinstance(p, Attribute):
nc, p1 = p.children
if contains(nc, att_node.qn) and value_match(p1, att_node.s):
return EMPTY
else:
return NotAllowed(p, att_node)
else:
return NotAllowed(p, att_node)
def atts_deriv(p, att_nodes):
if len(att_nodes) == 0:
return p
else:
return atts_deriv(att_deriv(p, att_nodes[0]), att_nodes[1:])
def value_match(p, s):
return (nullable(p) and whitespace(s)) or nullable(text_deriv(p, s))
def start_tag_close_deriv(p):
if isinstance(p, After):
p1, p2 = p.children
return after(start_tag_close_deriv(p1), p2)
elif isinstance(p, Choice):
p1, p2 = p.children
return choice(start_tag_close_deriv(p1), start_tag_close_deriv(p2))
elif isinstance(p, Group):
p1, p2 = p.children
return group(start_tag_close_deriv(p1), start_tag_close_deriv(p2))
elif isinstance(p, Interleave):
p1, p2 = p.children
return interleave(
start_tag_close_deriv(p1), start_tag_close_deriv(p2))
elif isinstance(p, OneOrMore):
return one_or_more(start_tag_close_deriv(p.children[0]))
elif isinstance(p, Attribute):
return NotAllowed(p)
else:
return p
def one_or_more(p):
if isinstance(p, NotAllowed):
return NotAllowed(p)
else:
return OneOrMore(p)
def children_deriv(p, child_nodes):
len_child_nodes = len(child_nodes)
if len_child_nodes == 0:
return children_deriv(p, [''])
elif len_child_nodes == 1 and isinstance(child_nodes[0], str):
s = child_nodes[0]
p1 = child_deriv(p, s)
if whitespace(s):
return choice(p, p1)
else:
return p1
else:
return strip_children_deriv(p, child_nodes)
def strip_children_deriv(p, child_nodes):
if len(child_nodes) == 0:
return p
else:
h, t = child_nodes[0], child_nodes[1:]
return strip_children_deriv(p if strip(h) else child_deriv(p, h), t)
def strip(child_node):
if isinstance(child_node, str):
return whitespace(child_node)
else:
return False
def whitespace(s):
return len(s.strip()) == 0
def end_tag_deriv(p):
if isinstance(p, Choice):
p1, p2 = p.children
return choice(end_tag_deriv(p1), end_tag_deriv(p2))
elif isinstance(p, After):
p1, p2 = p.children
if nullable(p1):
return p2
else:
return NotAllowed(p)
else:
return NotAllowed(p)
QName = namedtuple('QName', ['ns', 'lname'])
Att = namedtuple('Att', ['qn', 's'])
ElementNode = namedtuple('ElementNode', ['qn', 'atts', 'children'])
def to_doc_elem(elem_dom):
for c in list(elem_dom.childNodes):
node_type = c.nodeType
if node_type == xml.dom.Node.PROCESSING_INSTRUCTION_NODE:
elem_dom.removeChild(c)
elem_dom.normalize()
children = []
for child in elem_dom.childNodes:
node_type = child.nodeType
if node_type == xml.dom.Node.ELEMENT_NODE:
children.append(to_doc_elem(child))
elif node_type == xml.dom.Node.TEXT_NODE:
children.append(child.data)
atts = []
attrs_dom = elem_dom.attributes
if attrs_dom is not None:
for i in range(attrs_dom.length):
attr_dom = attrs_dom.item(i)
if attr_dom.prefix == 'xmlns' or attr_dom.name == 'xmlns':
continue
ns = '' if attr_dom.namespaceURI is None else attr_dom.namespaceURI
atts.append(Att(QName(ns, attr_dom.localName), attr_dom.nodeValue))
ns = '' if elem_dom.namespaceURI is None else elem_dom.namespaceURI
qn = QName(ns, elem_dom.localName)
return ElementNode(qn, tuple(atts), tuple(children))
def validate(schema_el, doc_str):
start_el = schema_el.children[0]
top_el = start_el.children[0]
doc = xml.dom.minidom.parseString(doc_str)
doc_root = to_doc_elem(doc.documentElement)
# print("schema el is", grammar_el)
deriv = child_deriv(top_el, doc_root)
# print("deriv is ", deriv)
if not nullable(deriv):
raise NotAllowedException(*deriv.children)
| {
"repo_name": "tlocke/prang",
"path": "prang/validation.py",
"copies": "1",
"size": "19766",
"license": "mit",
"hash": -2662663556738130400,
"line_mean": 27.3180515759,
"line_max": 79,
"alpha_frac": 0.5563088131,
"autogenerated": false,
"ratio": 3.61815852095918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46744673340591797,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import namedtuple
import numpy as np
import theano
import theano.tensor as T
from neupy import algorithms
from neupy.utils import asfloat
import neupy.algorithms.gd.conjgrad as cg
from data import simple_input_train, simple_target_train
from utils import compare_networks
from base import BaseTestCase
class ConjugateGradientTestCase(BaseTestCase):
def setUp(self):
super(ConjugateGradientTestCase, self).setUp()
self.connection = (3, 5, 2)
def test_functions(self):
Case = namedtuple("Case", "func input_data answer")
testcases = [
Case(
func=cg.fletcher_reeves,
input_data=(
np.array([1.35, 0.3]),
np.array([0.11, -0.5]),
np.array([0, 0]),
),
answer=0.137
),
Case(
func=cg.polak_ribiere,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0, 0]),
),
answer=0.174
),
Case(
func=cg.hentenes_stiefel,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=5.118
),
Case(
func=cg.conjugate_descent,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=-7.323
),
Case(
func=cg.liu_storey,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=1.243
),
Case(
func=cg.dai_yuan,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=38.647
),
]
for testcase in testcases:
input_data = asfloat(np.array(testcase.input_data))
variables = T.vectors(3)
# For functions some input variables can be optional and we
# ignore them during the computation. This solution cause errors
# related to the Theano computational graph, because we
# do not use all defined variables. That's why we need
# simple hack that fix this issue and do not add changes to
# the output result.
hack = asfloat(0) * variables[-1][0]
output_func = theano.function(
variables,
testcase.func(*variables) + hack
)
result = output_func(*input_data)
self.assertAlmostEqual(result, testcase.answer, places=3)
def test_conjgrad(self):
nw = algorithms.ConjugateGradient(
self.connection,
step=1,
error='mse',
shuffle_data=True,
verbose=False,
update_function='fletcher_reeves'
)
nw.train(simple_input_train, simple_target_train, epochs=300)
result = nw.predict(simple_input_train)
norm = np.linalg.norm(result - simple_target_train)
self.assertAlmostEqual(0.01, norm, places=3)
def test_compare_bp_and_cg(self):
compare_networks(
# Test classes
algorithms.GradientDescent,
partial(
algorithms.ConjugateGradient,
update_function='fletcher_reeves'
),
# Test data
(simple_input_train, simple_target_train),
# Network configurations
connection=self.connection,
step=1,
error='categorical_crossentropy',
shuffle_data=True,
# Test configurations
epochs=50,
show_comparison_plot=False
)
| {
"repo_name": "stczhc/neupy",
"path": "tests/algorithms/gd/test_conjgrad.py",
"copies": "1",
"size": "4244",
"license": "mit",
"hash": 3931742541242882000,
"line_mean": 31.3969465649,
"line_max": 76,
"alpha_frac": 0.472667295,
"autogenerated": false,
"ratio": 4.317395727365208,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290063022365209,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import OrderedDict
from werkzeug.local import LocalProxy
from werkzeug.datastructures import MultiDict
from flask import request, current_app, get_template_attribute
from .utils import get_url
_anyform = LocalProxy(lambda: current_app.extensions['anyform'])
_endpoints = LocalProxy(lambda: endpoints_list(request))
current_forms = LocalProxy(lambda: current_app.extensions['anyform'].get_current_forms())
def endpoints_list(request):
return [str(x) for x in (request.endpoint.rsplit(':')[-1],
request.endpoint.rsplit('.')[-1],
u'all')]
class AForm(object):
def __init__(self, **kwargs):
self.af_tag = kwargs.get('af_tag')
self.af_form = kwargs.get('af_form')
self.af_template = kwargs.get('af_template')
self.af_view_template = kwargs.get('af_view_template')
self.af_macro = kwargs.get('af_macro')
self.af_points = self.set_points(kwargs.get('af_points'))
def set_points(self, points):
if points:
return points
else:
return ['all']
def update(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
@property
def _renderable(self):
return get_template_attribute(self.af_template, self.af_macro)
def render(self):
return self._renderable(self)
def form_is(self):
if request.form:
return self.af_form(request.form)
elif request.json:
return self.af_form(MultiDict(request.json))
else:
return self.af_form()
@property
def form(self):
f = self.form_is()
f.validate()
self.set_form_next(f)
return f
def set_form_next(self, form):
if getattr(form, 'next', None):
form.next.data = get_url(request.args.get('next')) \
or get_url(request.form.get('next')) or ''
class AnyForm(object):
"""
The Flask-Anyform extension
:param app: The application.
:param forms: A list of AForm instances or dicts
"""
def __init__(self, app=None, forms=None, form_container=AForm, **kwargs):
self.app = app
self.forms = forms
self.form_container = form_container
self._ctxs = {}
self._ctx_prc = {}
if app is not None and forms is not None:
self.init_app(app)
def init_app(self, app):
"""
Initializes the Flask-Anyform extension for the specified Flask application.
:param app: The application.
:param forms: A list of AForm instances, or corresponding dicts
"""
self.app = app
self.init_provides(self.forms)
self.register_context_processors(app, self.init_context_processors())
app.extensions['anyform'] = self
def init_provides(self, forms):
setattr(self, 'provides', OrderedDict([self.init_provide(f) for f in forms]))
def init_provide(self, f):
if isinstance(f, dict):
f = self.form_container(**f)
return f.af_tag, f
def register_context_processors(self, app, context_processors):
app.jinja_env.globals.update(context_processors)
def init_context_processors(self):
for aform in self.provides.values():
self._ctx_prc.update(self.get_processor_for(aform))
self._ctx_prc.update({'anyform':_anyform, 'current_forms': current_forms})
return self._ctx_prc
def get_processor_for(self, aform):
return {"{}_form".format(aform.af_tag): self.aform_ctx_function(aform)}
def aform_ctx_function(self, aform):
run_ctx = partial(self._run_aform_ctx, aform.af_tag)
run_update = partial(aform.update)
return partial(self._on_aform_ctx, aform, run_ctx, run_update)
def _on_aform_ctx(self, aform, run_ctx, run_update):
run_update(**run_ctx())
return aform.render()
def _add_aform_ctx(self, tag, fn):
group = self._ctxs.setdefault(tag, [])
fn not in group and group.append(fn)
def _run_aform_ctx(self, tag):
rv, fns = {}, []
for g in [None, tag]:
for fn in self._ctxs.setdefault(g, []):
rv.update(fn())
return rv
def init_fn_name(self, name):
if name.partition('_')[0] == 'anyform':
return None
else:
return name.rpartition('_')[0]
def aform_ctx(self, fn):
"""add a function to inject ctx into aform at render
To add context to all aforms, prefix function with 'anyform':
@anyform.aform_ctx
def anyform_ctx():
do stuff
to add a function to a specific aform prefix the function with the tag:
@anyform.aform_ctx
def myform_ctx():
do stuff
"""
self._add_aform_ctx(self.init_fn_name(fn.__name__), fn)
def get_current_forms(self):
return {k: v for k, v in self.provides.items() if self.form_in_endpoint(v.af_points)}
def form_in_endpoint(self, af_points):
return any([(x in _endpoints) for x in af_points])
| {
"repo_name": "fc-thrisp-hurrata-dlm-graveyard/flask-anyform",
"path": "flask_anyform/anyform.py",
"copies": "1",
"size": "5176",
"license": "mit",
"hash": 3572110241266112500,
"line_mean": 30.5609756098,
"line_max": 93,
"alpha_frac": 0.595633694,
"autogenerated": false,
"ratio": 3.683985765124555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4779619459124555,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import OrderedDict
import json
import warnings
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from pyarrow.compat import guid
from ....utils import natural_sort_key, getargspec
from ..utils import _get_pyarrow_dtypes, _meta_from_dtypes
from ...utils import clear_known_categories
from .utils import (
_parse_pandas_metadata,
_normalize_index_columns,
Engine,
_analyze_paths,
)
def _get_md_row_groups(pieces):
""" Read file-footer metadata from each individual piece.
Since this operation can be painfully slow in some cases, abort
if any metadata or statistics are missing
"""
row_groups = []
row_groups_per_piece = []
for piece in pieces:
num_row_groups = piece.get_metadata().num_row_groups
for rg in range(num_row_groups):
row_group = piece.get_metadata().row_group(rg)
for c in range(row_group.num_columns):
if not row_group.column(c).statistics:
return (None, None)
row_groups.append(row_group)
row_groups_per_piece.append(num_row_groups)
if len(row_groups) == len(pieces):
row_groups_per_piece = None
# TODO: Skip row_groups_per_piece after ARROW-2801
return row_groups, row_groups_per_piece
def _get_row_groups_per_piece(pieces, metadata, path, fs):
""" Determine number of row groups in each dataset piece.
This function requires access to ParquetDataset.metadata
"""
# TODO: Remove this function after ARROW-2801
if metadata.num_row_groups == len(pieces):
return None # pieces already map to row-groups
result = OrderedDict()
for piece in pieces:
result[piece.path] = 0
for rg in range(metadata.num_row_groups):
filename = metadata.row_group(rg).column(0).file_path
if filename:
result[fs.sep.join([path, filename])] += 1
else:
return None # File path is missing, abort
return tuple(result.values())
def _merge_statistics(stats, s):
""" Update `stats` with vaules in `s`
"""
stats[-1]["total_byte_size"] += s["total_byte_size"]
stats[-1]["num-rows"] += s["num-rows"]
ncols = len(stats[-1]["columns"])
ncols_n = len(s["columns"])
if ncols != ncols_n:
raise ValueError(f"Column count not equal ({ncols} vs {ncols_n})")
for i in range(ncols):
name = stats[-1]["columns"][i]["name"]
j = i
for ii in range(ncols):
if name == s["columns"][j]["name"]:
break
if ii == ncols - 1:
raise KeyError(f"Column statistics missing for {name}")
j = (j + 1) % ncols
min_n = s["columns"][j]["min"]
max_n = s["columns"][j]["max"]
null_count_n = s["columns"][j]["null_count"]
min_i = stats[-1]["columns"][i]["min"]
max_i = stats[-1]["columns"][i]["max"]
stats[-1]["columns"][i]["min"] = min(min_i, min_n)
stats[-1]["columns"][i]["max"] = max(max_i, max_n)
stats[-1]["columns"][i]["null_count"] += null_count_n
return True
class SimplePiece:
""" SimplePiece
Surrogate class for PyArrow ParquetDatasetPiece.
Only used for flat datasets (not partitioned) where
a "_metadata" file is available.
"""
def __init__(self, path):
self.path = path
self.partition_keys = None
self.row_group = None
def _determine_dataset_parts(fs, paths, gather_statistics, filters, dataset_kwargs):
""" Determine how to access metadata and break read into ``parts``
This logic is mostly to handle `gather_statistics=False` cases,
because this also means we should avoid scanning every file in the
dataset.
"""
parts = []
if len(paths) > 1:
base, fns = _analyze_paths(paths, fs)
if "_metadata" in fns:
# We have a _metadata file
# PyArrow cannot handle "_metadata"
# when `paths` is a list.
paths.remove(base + fs.sep + "_metadata")
fns.remove("_metadata")
if gather_statistics is not False:
# If we are allowed to gather statistics,
# lets use "_metadata" instead of opening
# every file. Note that we don't need to check if
# the dataset is flat here, because PyArrow cannot
# properly handle partitioning in this case anyway.
dataset = pq.ParquetDataset(
base + fs.sep + "_metadata",
filesystem=fs,
filters=filters,
**dataset_kwargs,
)
dataset.metadata = dataset.pieces[0].get_metadata()
dataset.pieces = [SimplePiece(path) for path in paths]
dataset.partitions = None
return parts, dataset
if gather_statistics is not False:
# This scans all the files
dataset = pq.ParquetDataset(
paths, filesystem=fs, filters=filters, **dataset_kwargs
)
if dataset.schema is None:
# The dataset may have inconsistent schemas between files.
# If so, we should try to use a "_common_metadata" file
proxy_path = (
base + fs.sep + "_common_metadata"
if "_common_metadata" in fns
else paths[0]
)
dataset.schema = pq.ParquetDataset(proxy_path, filesystem=fs).schema
else:
# Rely on schema for 0th file.
# Will need to pass a list of paths to read_partition
dataset = pq.ParquetDataset(paths[0], filesystem=fs, **dataset_kwargs)
parts = [base + fs.sep + fn for fn in fns]
elif fs.isdir(paths[0]):
# This is a directory, check for _metadata, then _common_metadata
allpaths = fs.glob(paths[0] + fs.sep + "*")
base, fns = _analyze_paths(allpaths, fs)
# Check if dataset is "not flat" (partitioned into directories).
# If so, we will need to let pyarrow generate the `dataset` object.
not_flat = any([fs.isdir(p) for p in fs.glob(fs.sep.join([base, "*"]))])
if "_metadata" in fns and "validate_schema" not in dataset_kwargs:
dataset_kwargs["validate_schema"] = False
if not_flat or "_metadata" in fns or gather_statistics is not False:
# Let arrow do its thing (use _metadata or scan files)
dataset = pq.ParquetDataset(
paths, filesystem=fs, filters=filters, **dataset_kwargs
)
if dataset.schema is None:
# The dataset may have inconsistent schemas between files.
# If so, we should try to use a "_common_metadata" file
proxy_path = (
base + fs.sep + "_common_metadata"
if "_common_metadata" in fns
else allpaths[0]
)
dataset.schema = pq.ParquetDataset(proxy_path, filesystem=fs).schema
else:
# Use _common_metadata file if it is available.
# Otherwise, just use 0th file
if "_common_metadata" in fns:
dataset = pq.ParquetDataset(
base + fs.sep + "_common_metadata", filesystem=fs, **dataset_kwargs
)
else:
dataset = pq.ParquetDataset(
allpaths[0], filesystem=fs, **dataset_kwargs
)
parts = [base + fs.sep + fn for fn in fns if fn != "_common_metadata"]
else:
# There is only one file to read
dataset = pq.ParquetDataset(paths, filesystem=fs, **dataset_kwargs)
return parts, dataset
def _write_partitioned(
table, root_path, partition_cols, fs, preserve_index=True, **kwargs
):
""" Write table to a partitioned dataset with pyarrow.
Logic copied from pyarrow.parquet.
(arrow/python/pyarrow/parquet.py::write_to_dataset)
TODO: Remove this in favor of pyarrow's `write_to_dataset`
once ARROW-8244 is addressed.
"""
fs.mkdirs(root_path, exist_ok=True)
df = table.to_pandas(ignore_metadata=True)
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis="columns")
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0 and not preserve_index:
raise ValueError("No data left to save outside partition columns")
subschema = table.schema
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
md_list = []
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = fs.sep.join(
[
"{colname}={value}".format(colname=name, value=val)
for name, val in zip(partition_cols, keys)
]
)
subtable = pa.Table.from_pandas(
subgroup, preserve_index=False, schema=subschema, safe=False
)
prefix = fs.sep.join([root_path, subdir])
fs.mkdir(prefix, exists_ok=True)
outfile = guid() + ".parquet"
full_path = fs.sep.join([prefix, outfile])
with fs.open(full_path, "wb") as f:
pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)
md_list[-1].set_file_path(fs.sep.join([subdir, outfile]))
return md_list
class ArrowEngine(Engine):
@classmethod
def read_metadata(
cls,
fs,
paths,
categories=None,
index=None,
gather_statistics=None,
filters=None,
split_row_groups=True,
**kwargs,
):
# Define the dataset object to use for metadata,
# Also, initialize `parts`. If `parts` is populated here,
# then each part will correspond to a file. Otherwise, each part will
# correspond to a row group (populated below)
parts, dataset = _determine_dataset_parts(
fs, paths, gather_statistics, filters, kwargs.get("dataset", {})
)
# Check if the column-chunk file_path's are set in "_metadata".
# If available, we can use the path to sort the row-groups
col_chunk_paths = False
if dataset.metadata:
col_chunk_paths = all(
dataset.metadata.row_group(i).column(0).file_path is not None
for i in range(dataset.metadata.num_row_groups)
)
# TODO: Call to `_determine_dataset_parts` uses `pq.ParquetDataset`
# to define the `dataset` object. `split_row_groups` should be passed
# to that constructor once it is supported (see ARROW-2801).
if dataset.partitions is not None:
partitions = [
n for n in dataset.partitions.partition_names if n is not None
]
if partitions and dataset.metadata:
# Dont use dataset.metadata for partitioned datasets, unless
# the column-chunk metadata includes the `"file_path"`.
# The order of dataset.metadata.row_group items is often
# different than the order of `dataset.pieces`.
if not col_chunk_paths or (
len(dataset.pieces) != dataset.metadata.num_row_groups
):
dataset.schema = dataset.metadata.schema
dataset.metadata = None
else:
partitions = []
# Statistics are currently collected at the row-group level only.
# Therefore, we cannot perform filtering with split_row_groups=False.
# For "partitioned" datasets, each file (usually) corresponds to a
# row-group anyway.
# TODO: Map row-group statistics onto file pieces for filtering.
# This shouldn't be difficult if `col_chunk_paths==True`
if not split_row_groups and not col_chunk_paths:
if gather_statistics is None and not partitions:
gather_statistics = False
if filters:
raise ValueError(
"Filters not supported with split_row_groups=False "
"(unless proper _metadata is available)."
)
if gather_statistics and not partitions:
raise ValueError(
"Statistics not supported with split_row_groups=False."
"(unless proper _metadata is available)."
)
if dataset.metadata:
schema = dataset.metadata.schema.to_arrow_schema()
else:
schema = dataset.schema.to_arrow_schema()
columns = None
has_pandas_metadata = (
schema.metadata is not None and b"pandas" in schema.metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(pandas_metadata)
if categories is None:
categories = []
for col in pandas_metadata["columns"]:
if (col["pandas_type"] == "categorical") and (
col["name"] not in categories
):
categories.append(col["name"])
else:
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
if index is None and index_names:
index = index_names
if set(column_names).intersection(partitions):
raise ValueError(
"partition(s) should not exist in columns.\n"
"categories: {} | partitions: {}".format(column_names, partitions)
)
column_names, index_names = _normalize_index_columns(
columns, column_names + partitions, index, index_names
)
all_columns = index_names + column_names
pieces = sorted(dataset.pieces, key=lambda piece: natural_sort_key(piece.path))
# Check that categories are included in columns
if categories and not set(categories).intersection(all_columns):
raise ValueError(
"categories not in available columns.\n"
"categories: {} | columns: {}".format(categories, list(all_columns))
)
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
index_cols = index or ()
meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)
meta = clear_known_categories(meta, cols=categories)
if (
gather_statistics is None
and dataset.metadata
and dataset.metadata.num_row_groups >= len(pieces)
):
gather_statistics = True
if not pieces:
gather_statistics = False
if filters:
# Filters may require us to gather statistics
if gather_statistics is False and partitions:
warnings.warn(
"Filtering with gather_statistics=False. "
"Only partition columns will be filtered correctly."
)
elif gather_statistics is False:
raise ValueError("Cannot apply filters with gather_statistics=False")
elif not gather_statistics:
gather_statistics = True
row_groups_per_piece = None
if gather_statistics:
# Read from _metadata file
if dataset.metadata and dataset.metadata.num_row_groups >= len(pieces):
row_groups = [
dataset.metadata.row_group(i)
for i in range(dataset.metadata.num_row_groups)
]
# Re-order row-groups by path name if known
if col_chunk_paths:
row_groups = sorted(
row_groups,
key=lambda row_group: natural_sort_key(
row_group.column(0).file_path
),
)
if split_row_groups and len(dataset.paths) == 1:
row_groups_per_piece = _get_row_groups_per_piece(
pieces, dataset.metadata, dataset.paths[0], fs
)
names = dataset.metadata.schema.names
else:
# Read from each individual piece (quite possibly slow).
row_groups, row_groups_per_piece = _get_md_row_groups(pieces)
if row_groups:
piece = pieces[0]
md = piece.get_metadata()
names = md.schema.names
else:
gather_statistics = False
if gather_statistics:
stats = []
skip_cols = set() # Columns with min/max = None detected
path_last = None
for ri, row_group in enumerate(row_groups):
s = {"num-rows": row_group.num_rows, "columns": []}
for i, name in enumerate(names):
if name not in skip_cols:
column = row_group.column(i)
d = {"name": name}
if column.statistics:
cs_min = column.statistics.min
cs_max = column.statistics.max
if not column.statistics.has_min_max:
cs_min, cs_max = None, None
if None in [cs_min, cs_max] and ri == 0:
skip_cols.add(name)
continue
cs_vals = pd.Series([cs_min, cs_max])
d.update(
{
"min": cs_vals[0],
"max": cs_vals[1],
"null_count": column.statistics.null_count,
}
)
s["columns"].append(d)
s["total_byte_size"] = row_group.total_byte_size
if col_chunk_paths:
s["file_path_0"] = row_group.column(0).file_path
if not split_row_groups and (s["file_path_0"] == path_last):
# Rather than appending a new "row-group", just merge
# new `s` statistics into last element of `stats`.
# Note that each stats element will now correspond to an
# entire file (rather than actual "row-groups")
_merge_statistics(stats, s)
continue
else:
path_last = s["file_path_0"]
stats.append(s)
else:
stats = None
if dataset.partitions:
for partition in dataset.partitions:
if isinstance(index, list) and partition.name == index[0]:
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=index[0]
)
elif partition.name == meta.index.name:
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=meta.index.name
)
elif partition.name in meta.columns:
meta[partition.name] = pd.Categorical(
categories=partition.keys, values=[]
)
# Create `parts`
# This is a list of row-group-descriptor dicts, or file-paths
# if we have a list of files and gather_statistics=False
if not parts:
if split_row_groups and row_groups_per_piece:
# TODO: This block can be removed after ARROW-2801
parts = []
rg_tot = 0
for i, piece in enumerate(pieces):
num_row_groups = row_groups_per_piece[i]
for rg in range(num_row_groups):
parts.append((piece.path, rg, piece.partition_keys))
# Setting file_path here, because it may be
# missing from the row-group/column-chunk stats
if "file_path_0" not in stats[rg_tot]:
stats[rg_tot]["file_path_0"] = piece.path
rg_tot += 1
else:
parts = [
(piece.path, piece.row_group, piece.partition_keys)
for piece in pieces
]
parts = [
{
"piece": piece,
"kwargs": {"partitions": dataset.partitions, "categories": categories},
}
for piece in parts
]
return (meta, stats, parts)
@classmethod
def read_partition(
cls, fs, piece, columns, index, categories=(), partitions=(), **kwargs
):
if isinstance(index, list):
for level in index:
# unclear if we can use set ops here. I think the order matters.
# Need the membership test to avoid duplicating index when
# we slice with `columns` later on.
if level not in columns:
columns.append(level)
if isinstance(piece, str):
# `piece` is a file-path string
piece = pq.ParquetDatasetPiece(
piece, open_file_func=partial(fs.open, mode="rb")
)
else:
# `piece` contains (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
piece = pq.ParquetDatasetPiece(
path,
row_group=row_group,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
# Ensure `columns` and `partitions` do not overlap
columns_and_parts = columns.copy()
if columns_and_parts and partitions:
for part_name in partitions.partition_names:
if part_name in columns:
columns.remove(part_name)
else:
columns_and_parts.append(part_name)
columns = columns or None
arrow_table = cls._parquet_piece_as_arrow(piece, columns, partitions, **kwargs)
df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)
# Note that `to_pandas(ignore_metadata=False)` means
# pyarrow will use the pandas metadata to set the index.
index_in_columns_and_parts = set(df.index.names).issubset(
set(columns_and_parts)
)
if not index:
if index_in_columns_and_parts:
# User does not want to set index and a desired
# column/partition has been set to the index
df.reset_index(drop=False, inplace=True)
else:
# User does not want to set index and an
# "unwanted" column has been set to the index
df.reset_index(drop=True, inplace=True)
else:
if set(df.index.names) != set(index) and index_in_columns_and_parts:
# The wrong index has been set and it contains
# one or more desired columns/partitions
df.reset_index(drop=False, inplace=True)
elif index_in_columns_and_parts:
# The correct index has already been set
index = False
columns_and_parts = list(
set(columns_and_parts).difference(set(df.index.names))
)
df = df[list(columns_and_parts)]
if index:
df = df.set_index(index)
return df
@classmethod
def _arrow_table_to_pandas(
cls, arrow_table: pa.Table, categories, **kwargs
) -> pd.DataFrame:
_kwargs = kwargs.get("arrow_to_pandas", {})
_kwargs.update({"use_threads": False, "ignore_metadata": False})
return arrow_table.to_pandas(categories=categories, **_kwargs)
@classmethod
def _parquet_piece_as_arrow(
cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs
) -> pa.Table:
arrow_table = piece.read(
columns=columns,
partitions=partitions,
use_pandas_metadata=True,
use_threads=False,
**kwargs.get("read", {}),
)
return arrow_table
@staticmethod
def initialize_write(
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
**kwargs,
):
dataset = fmd = None
i_offset = 0
if append and division_info is None:
ignore_divisions = True
fs.mkdirs(path, exist_ok=True)
if append:
try:
# Allow append if the dataset exists.
# Also need dataset.metadata object if
# ignore_divisions is False (to check divisions)
dataset = pq.ParquetDataset(path, filesystem=fs)
if not dataset.metadata and not ignore_divisions:
# TODO: Be more flexible about existing metadata.
raise NotImplementedError(
"_metadata file needed to `append` "
"with `engine='pyarrow'` "
"unless `ignore_divisions` is `True`"
)
fmd = dataset.metadata
except (IOError, ValueError, IndexError):
# Original dataset does not exist - cannot append
append = False
if append:
names = dataset.metadata.schema.names
has_pandas_metadata = (
dataset.schema.to_arrow_schema().metadata is not None
and b"pandas" in dataset.schema.to_arrow_schema().metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(
dataset.schema.to_arrow_schema().metadata[b"pandas"].decode("utf8")
)
categories = [
c["name"]
for c in pandas_metadata["columns"]
if c["pandas_type"] == "categorical"
]
else:
categories = None
dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)
if set(names) != set(df.columns) - set(partition_on):
raise ValueError(
"Appended columns not the same.\n"
"Previous: {} | New: {}".format(names, list(df.columns))
)
elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():
# TODO Coerce values for compatible but different dtypes
raise ValueError(
"Appended dtypes differ.\n{}".format(
set(dtypes.items()) ^ set(df.dtypes.iteritems())
)
)
i_offset = len(dataset.pieces)
if division_info["name"] not in names:
ignore_divisions = True
if not ignore_divisions:
old_end = None
row_groups = [
dataset.metadata.row_group(i)
for i in range(dataset.metadata.num_row_groups)
]
for row_group in row_groups:
for i, name in enumerate(names):
if name != division_info["name"]:
continue
column = row_group.column(i)
if column.statistics:
if not old_end:
old_end = column.statistics.max
else:
old_end = max(old_end, column.statistics.max)
break
divisions = division_info["divisions"]
if divisions[0] < old_end:
raise ValueError(
"Appended divisions overlapping with the previous ones"
" (set ignore_divisions=True to append anyway).\n"
"Previous: {} | New: {}".format(old_end, divisions[0])
)
return fmd, i_offset
@staticmethod
def write_partition(
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
schema=None,
**kwargs,
):
_meta = None
preserve_index = False
if index_cols:
df = df.set_index(index_cols)
preserve_index = True
t = pa.Table.from_pandas(df, preserve_index=preserve_index, schema=schema)
if partition_on:
md_list = _write_partitioned(
t, path, partition_on, fs, preserve_index=preserve_index, **kwargs
)
if md_list:
_meta = md_list[0]
for i in range(1, len(md_list)):
_meta.append_row_groups(md_list[i])
else:
md_list = []
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
_meta = md_list[0]
_meta.set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
return [{"schema": t.schema, "meta": _meta}]
else:
return []
@staticmethod
def write_metadata(parts, fmd, fs, path, append=False, **kwargs):
if parts:
if not append:
# Get only arguments specified in the function
common_metadata_path = fs.sep.join([path, "_common_metadata"])
keywords = getargspec(pq.write_metadata).args
kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}
with fs.open(common_metadata_path, "wb") as fil:
pq.write_metadata(parts[0][0]["schema"], fil, **kwargs_meta)
# Aggregate metadata and write to _metadata file
metadata_path = fs.sep.join([path, "_metadata"])
if append and fmd is not None:
_meta = fmd
i_start = 0
else:
_meta = parts[0][0]["meta"]
i_start = 1
for i in range(i_start, len(parts)):
_meta.append_row_groups(parts[i][0]["meta"])
with fs.open(metadata_path, "wb") as fil:
_meta.write_metadata_file(fil)
| {
"repo_name": "ContinuumIO/dask",
"path": "dask/dataframe/io/parquet/arrow.py",
"copies": "1",
"size": "31580",
"license": "bsd-3-clause",
"hash": -7615407034314589000,
"line_mean": 39.3320561941,
"line_max": 87,
"alpha_frac": 0.5242241925,
"autogenerated": false,
"ratio": 4.4560462819246505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.548027047442465,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from collections import OrderedDict
try:
import shiboken
except ImportError:
from PySide import shiboken
from PySide import QtGui, QtCore
from jukeboxcore.gui.main import JB_MainWindow
class WidgetToolTip(QtGui.QWidget):
"""A ToolTip that can be installed on a widget via :meth:`WidgetToolTip.install_tooltip`
The tooltip is a selector for added widgets. The widgets are rendered to icons that are placed onto Buttons.
If a button is clicked the widget receives focus.
.. Warning:: Setting the layout after initialisation has no effect at the moment.
There are a few properties and setters that can be changed. The affect might only take place after calling :meth:`WidgetToolTip.show`.
Properties and setters:
::meth:`WidgetToolTip.alignment`: property for the alignment relative to the mouse
::meth:`WidgetToolTip.offset`: property for the offset relativ to the alignment
::meth:`WidgetToolTip.triggerevent`: property for the event that triggers the tooltip
::meth:`WidgetToolTip.setup_size`: setter for the size of one cell/button
::meth:`WidgetToolTip.setup_cyatimer`: setter for the time the widget waits before closing
To use this tooltip for any widget that triggers a ToolTip event:
1. Create the WidgetToolTip widget
2. Install it on a widget
3. Add Widgets to the WidgetToolTip
Example::
mainwidget = QtGui.QWidget()
widget1 = QtGui.QWidget()
widget2 = QtGui.QWidget()
# Step 1 with the default parameters
tooltip = WidgetToolTip(orientation=QtCore.Qt.Horizontal,
alignment=QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
offset=20,
parent=None,
flags=QtCore.Qt.CustomizeWindowHint)
# Step 2
tooltip.install_tooltip(mainwidget)
# Step 3
tooltip.add_widget(widget1)
tooltip.add_widget(widget2)
"""
def __init__(self, orientation=QtCore.Qt.Horizontal,
alignment=None,
offset=20,
interval=1000,
size=QtCore.QSize(160, 90),
triggerevent=QtCore.QEvent.ToolTip,
parent=None,
flags=QtCore.Qt.CustomizeWindowHint): # hides title bar but gives a frames
"""Initialize the ToolTip in the given orientation with an optional parent and windowflags.
:param orientation: the orientation of the tooltip. horizontal or vertical
:type orientation: QtCore.Qt.Orientation
:param parent: the parent of the widget
:param alignment: affcts the positon of the popup relative to the mouse. If None, align left and vcenter is chosen
Use left, right, hcenter, top, bottom and vcenter only. Everything else will be ignored.
:type alignment: QtCore.Qt.Alignment | None
:param offset: The offset to the alignment in pixels
:type offset: int
:param interval: The time to wait for the tooltip to close in miliseconds
:type interval: int
:param size: The size of one cell/button
:type size: QtCore.QSize
:param triggerevent: The event that triggers the tooltip
:type triggerevent: QtCore.QEvent.Type
:type parent: QtGui.QWidget
:param flags: the windowflags
:type flags: QtCore.QtWindowFlags
:raises: TypeError
"""
super(WidgetToolTip, self).__init__(parent, flags)
self._buttons = OrderedDict()
self._triggerevent = triggerevent
self._alignment = alignment
self._offset = 20
if alignment is None:
self._alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
self.setup_layout(orientation)
self.setup_size(size.width(), size.height())
self.setup_cyatimer(interval)
def setup_layout(self, orientation=None):
"""Setup the layout for the tooltip in the given orientation
:param layout: the orentation of the layout
:type layout: QtCore.Qt.Orientation | None
:returns: None
:rtype: None
:raises: None
"""
if orientation == QtCore.Qt.Horizontal or orientation is None:
layout = QtGui.QHBoxLayout()
elif orientation == QtCore.Qt.Vertical:
layout = QtGui.QVBoxLayout()
else:
raise TypeError('Orientation is of wrong type! Allowed is QtCore.Qt.Horizontal and QtCore.Qt.Vertical. Given: %s' % orientation)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
def setup_size(self, width, height):
"""Set the width and height for one cell in the tooltip
This is inderectly acomplished by setting the iconsizes for the buttons.
:param width: the width of one cell, min. is 7 -> icon width = 0
:type width: int
:param height: the height of one cell, min. is 6 -> icon height = 0
:type height: int
:returns: None
:rtype: None
:raises: None
"""
self._iconw = max(0, width - 7)
self._iconh = max(0, height - 6)
self.update_all_buttons()
def setup_cyatimer(self, interval):
"""Setup the timer that will close the widget after the mouse left the widget for the time of interval
:param interval: the time that the tooltip waits before it dissapears in milliseconds
:type interval: int
:returns: None
:rtype: None
:raises: None
"""
self.cyatimer = QtCore.QTimer(self)
self.cyatimer.setSingleShot(True)
self.cyatimer.timeout.connect(self.hide)
self._interval = interval
def event(self, event):
"""Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
"""
if event.type() == QtCore.QEvent.WindowDeactivate: # hide the tooltip
self.cyatimer.stop()
self.hide()
return True
if event.type() == QtCore.QEvent.Leave: # start timer
self.cyatimer.start(self._interval)
return True
if event.type() == QtCore.QEvent.Enter: # reset/stop timer
self.cyatimer.stop()
return True
if event.type() == QtCore.QEvent.Show: # render the widgets
self.cyatimer.stop()
return True
return super(WidgetToolTip, self).event(event)
def create_button(self, widget):
"""Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
"""
btn = QtGui.QToolButton(self)
btn.setIconSize(QtCore.QSize(self._iconw, self._iconh))
self.update_button(btn, widget)
return btn
def update_button(self, button, widget):
"""Update the icon of the button with the given widget
if the widget does not is invalid, it is deleted from the tooltip automatically.
:param button: the button to update
:type button: QtGui.QAbstractButton
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if not shiboken.isValid(widget):
self.remove_widget(widget)
return
button.setIconSize(QtCore.QSize(self._iconw, self._iconh))
pix = QtGui.QPixmap(widget.size())
widget.render(pix)
icon = QtGui.QIcon(pix)
button.setIcon(icon)
def update_all_buttons(self, ):
"""Update all buttons
:returns: None
:rtype: None
:raises: None
"""
for widget, button in self._buttons.items():
self.update_button(button, widget)
self.adjustSize()
def focus_widget(self, checked=None, w=None):
"""Focus the given widget. Checked is ignored and only used as a slot for QAbstractButton.clicked.
:param checked: The checked state of the button that was clicked
:type checked: bool
:param w: the widget to focus
:type w: QtGui.QWidget
:returns: None
:raises: None
"""
if w is None:
return
if w.isMinimized():
w.showNormal()
else:
w.show()
w.activateWindow()
w.setFocus()
def add_widget(self, widget):
"""Add the given widget to the tooltip
:param widget: the widget to add
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: None
"""
if self._buttons.get(widget):
return
btn = self.create_button(widget)
cb = partial(self.focus_widget, w=widget)
btn.clicked.connect(cb)
self.layout().addWidget(btn)
self._buttons[widget] = btn
def remove_widget(self, widget):
"""Remove the given widget from the tooltip
:param widget: the widget to remove
:type widget: QtGui.QWidget
:returns: None
:rtype: None
:raises: KeyError
"""
button = self._buttons.pop(widget)
self.layout().removeWidget(button)
button.deleteLater()
def eventFilter(self, watched, event):
"""Filter ToolTip events and display this tooltip widget, if watched requests a tooltip.
:param watched: The watched object
:type watched: QtCore.QObject
:param event: The event sent by watched
:type event: QtCore.QEvent
:returns: True if the event was processed. False if the event should be passed on.
:rtype: bool
:raises: None
"""
if event.type() == self._triggerevent:
self.show()
return True
else:
return False
def get_position(self, ):
"""Return a recommended position for this widget to appear
This implemenation returns a position so that the widget is vertically centerd on the mouse
and 10 pixels left of the mouse
:returns: the position
:rtype: QPoint
:raises: None
"""
pos = QtGui.QCursor.pos()
if self._alignment & QtCore.Qt.AlignLeft == QtCore.Qt.AlignLeft:
pos.setX(pos.x() - self._offset)
elif self._alignment & QtCore.Qt.AlignRight == QtCore.Qt.AlignRight:
pos.setX(pos.x() - self.frameGeometry().width() + self._offset)
elif self._alignment & QtCore.Qt.AlignHCenter == QtCore.Qt.AlignHCenter:
pos.setX(pos.x() - self.frameGeometry().width()/2)
if self._alignment & QtCore.Qt.AlignTop == QtCore.Qt.AlignTop:
pos.setY(pos.y() - self._offset)
elif self._alignment & QtCore.Qt.AlignBottom == QtCore.Qt.AlignBottom:
pos.setY(pos.y() - self.frameGeometry().height() + self._offset)
elif self._alignment & QtCore.Qt.AlignVCenter == QtCore.Qt.AlignVCenter:
pos.setY(pos.y() - self.frameGeometry().height()/2)
return pos
def install_tooltip(self, parent):
"""Intall the tooltip on the parent so that it is shown when parent requests a tooltip
:param parent: the parent object
:type parent: QObject
:returns: None
:rtype: None
:raises: None
"""
parent.installEventFilter(self)
@property
def alignment(self):
"""Get the alginment of the tooltip relative to the mouse
:returns: alignment
:rtype: QtCore.Qt.Alignment
:raises: None
"""
return self._alignment
@alignment.setter
def alignment(self, alignment):
"""Set the alginment of the tooltip relative to the mouse
:param alignment: The value for alignment
:type alignment: QtCore.Qt.Alignment
:raises: None
"""
self._alignment = alignment
@property
def offset(self):
"""Return offset to the alignment in pixels
:returns: offset
:rtype: int
:raises: None
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Set offset to the alginment in pixels
:param offset: The value for offset
:type offset: int
:raises: None
"""
self._offset = offset
@property
def triggerevent(self):
"""Return triggerevent
:returns: triggerevent
:rtype: QtCore.QEvent.Type
:raises: None
"""
return self._triggerevent
@triggerevent.setter
def triggerevent(self, eventtype):
"""Set triggerevent
:param eventtype: The value for triggerevent
:type eventtype: QtCore.QEvent.Type
:raises: None
"""
self._triggerevent = eventtype
def get_widgets(self, ):
"""Return all registered Widgets
:returns: list of widgets
:rtype: list
:raises: None
"""
return self._buttons.keys()
def show(self, ):
"""Reimplementation that moves the tooltip and updates the buttons
:returns: None
:rtype: None
:raises: None
"""
self.update_all_buttons()
pos = self.get_position()
self.move(pos)
super(WidgetToolTip, self).show()
class JB_WindowToolTip(WidgetToolTip):
def show(self, ):
"""Reimplementation of show to update all currently available JB_MainWindows
:returns: None
:rtype: None
:raises: None
"""
wins = set(JB_MainWindow.instances())
widgets = set(self.get_widgets())
for w in wins - widgets:
self.add_widget(w)
super(JB_WindowToolTip, self).show()
| {
"repo_name": "JukeboxPipeline/jukebox-core",
"path": "src/jukeboxcore/gui/widgets/tooltip.py",
"copies": "1",
"size": "14267",
"license": "bsd-3-clause",
"hash": -1364760232753933600,
"line_mean": 33.5447941889,
"line_max": 140,
"alpha_frac": 0.6111305811,
"autogenerated": false,
"ratio": 4.328580097087379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007954191989697231,
"num_lines": 413
} |
from functools import partial
from collections import Sequence
from flask import Blueprint, g, abort, Response, jsonify, request, \
render_template, escape
from werkzeug.utils import cached_property
from flask.ext.introspect import Tree, TreeRootView, NOTEXIST
def setup_tree(
objects,
root_class,
roots=(),
leaf_classes=(),
template_base=None,
xhr_template_base=None,
tree_class=Tree,
):
if callable(objects):
objects = objects()
if not isinstance(objects, root_class.__type__):
abort(500,
'Root objects type (%s) does not matches with %s.__type___ (%s)'
% (escape(type(objects)), root_class.__name__,
escape(root_class.__type__)))
if roots:
g.tree = tree_class(objects, roots, root_class=root_class,
leaf_classes=leaf_classes)
if template_base:
g.template_base = template_base
if xhr_template_base:
g.xhr_template_base = xhr_template_base
def path(path=None):
if path is None:
if not hasattr(g.tree.root, 'view'):
abort(404)
g.item = g.tree.root
else:
(treeitem, tail) = g.tree.get(path)
if treeitem is NOTEXIST:
abort(404)
if not hasattr(treeitem, 'view'):
abort(404)
g.item = treeitem
result = g.item.view().dispatch_request(g.item)
if result is None:
return ''
if isinstance(result, (Response, basestring)):
return result
if not (isinstance(result, Sequence) and len(result) == 2):
if isinstance(result, Sequence):
return jsonify(*result)
else:
return jsonify(result)
(template, context) = result
# view methods can override this g attributes to change base template
if request.is_xhr:
base = getattr(g, 'xhr_template_base', None)
else:
base = getattr(g, 'template_base', None)
if base:
return render_template(base, template=template, **context)
else:
return render_template(template, **context)
def blueprint(
name,
import_name,
objects,
setup=setup_tree,
path=path,
methods=['get', 'post', 'put', 'patch', 'delete', 'head'],
super_root_class=TreeRootView,
**kwargs
):
bp = Blueprint(name, import_name)
bp.before_request(partial(setup_tree, objects, super_root_class,
**kwargs))
if hasattr(super_root_class, 'view'):
bp.route('/', methods=methods)(path)
bp.route('/<path:path>', methods=methods)(path)
return bp
| {
"repo_name": "denz/flask_introspect",
"path": "flask_introspect/rest.py",
"copies": "1",
"size": "2616",
"license": "bsd-3-clause",
"hash": -4090849128459230700,
"line_mean": 24.3980582524,
"line_max": 78,
"alpha_frac": 0.5967125382,
"autogenerated": false,
"ratio": 3.8584070796460175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4955119617846017,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from ...compat.collections import Counter
from ...external.qt.QtGui import (QWidget, QSlider, QLabel, QComboBox,
QHBoxLayout, QVBoxLayout)
from ...external.qt.QtCore import Qt, Signal
from ..widget_properties import (TextProperty,
ValueProperty,
CurrentComboProperty)
class SliceWidget(QWidget):
label = TextProperty('_ui_label')
slice_center = ValueProperty('_ui_slider')
mode = CurrentComboProperty('_ui_mode')
slice_changed = Signal(int)
mode_changed = Signal(str)
def __init__(self, label='', pix2world=None, lo=0, hi=10,
parent=None, aggregation=None):
super(SliceWidget, self).__init__(parent)
if aggregation is not None:
raise NotImplemented("Aggregation option not implemented")
if pix2world is not None:
raise NotImplemented("Pix2world option not implemented")
layout = QVBoxLayout()
layout.setContentsMargins(3, 1, 3, 1)
top = QHBoxLayout()
top.setContentsMargins(3, 3, 3, 3)
label = QLabel(label)
top.addWidget(label)
mode = QComboBox()
mode.addItem('x', 'x')
mode.addItem('y', 'y')
mode.addItem('slice', 'slice')
mode.currentIndexChanged.connect(lambda x:
self.mode_changed.emit(self.mode))
mode.currentIndexChanged.connect(self._update_mode)
top.addWidget(mode)
layout.addLayout(top)
slider = QSlider(Qt.Horizontal)
slider.setMinimum(lo)
slider_lbl = QLabel()
slider.setMaximum(hi)
slider.setValue((lo + hi) / 2)
slider.valueChanged.connect(lambda x:
self.slice_changed.emit(self.mode))
slider.valueChanged.connect(lambda x: slider_lbl.setText(str(x)))
layout.addWidget(slider_lbl)
layout.addWidget(slider)
self.setLayout(layout)
self._ui_label = label
self._ui_slider = slider
self._slider_lbl = slider_lbl
self._ui_mode = mode
self._update_mode()
self._frozen = False
def _update_mode(self, *args):
if self.mode != 'slice':
self._ui_slider.hide()
self._slider_lbl.hide()
else:
self._ui_slider.show()
self._slider_lbl.show()
def freeze(self):
self.mode = 'slice'
self._ui_mode.setEnabled(False)
self._ui_slider.hide()
self._frozen = True
@property
def frozen(self):
return self._frozen
class DataSlice(QWidget):
"""
A DatSlice widget provides an inteface for selection
slices through an N-dimensional dataset
Signals
-------
slice_changed : triggered when the slice through the data changes
"""
slice_changed = Signal()
def __init__(self, data=None, parent=None):
"""
:param data: :class:`~glue.core.data.Data` instance, or None
"""
super(DataSlice, self).__init__(parent)
self._slices = []
self._data = None
layout = QVBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(0, 3, 0, 3)
self.layout = layout
self.setLayout(layout)
self.set_data(data)
@property
def ndim(self):
return len(self.shape)
@property
def shape(self):
return tuple() if self._data is None else self._data.shape
def _clear(self):
for _ in range(self.layout.count()):
self.layout.takeAt(0)
for s in self._slices:
s.close()
self._slices = []
def set_data(self, data):
"""
Change datasets
:parm data: :class:`~glue.core.data.Data` instance
"""
# remove old widgets
self._clear()
self._data = data
if data is None or data.ndim < 3:
return
# create slider widget for each dimension...
for i, s in enumerate(data.shape):
slider = SliceWidget(data.get_world_component_id(i).label,
hi=s - 1)
if i == self.ndim - 1:
slider.mode = 'x'
elif i == self.ndim - 2:
slider.mode = 'y'
else:
slider.mode = 'slice'
self._slices.append(slider)
# save ref to prevent PySide segfault
self.__on_slice = partial(self._on_slice, i)
self.__on_mode = partial(self._on_mode, i)
slider.slice_changed.connect(self.__on_slice)
slider.mode_changed.connect(self.__on_mode)
if s == 1:
slider.freeze()
# ... and add to the layout
for s in self._slices[::-1]:
self.layout.addWidget(s)
s.show() # this somehow fixes #342
self.layout.addStretch(5)
def _on_slice(self, index, slice_val):
self.slice_changed.emit()
def _on_mode(self, index, mode_index):
s = self.slice
def isok(ss):
# valid slice description: 'x' and 'y' both appear
c = Counter(ss)
return c['x'] == 1 and c['y'] == 1
if isok(s):
self.slice_changed.emit()
return
for i in range(len(s)):
if i == index:
continue
if self._slices[i].frozen:
continue
for mode in 'x', 'y', 'slice':
if self._slices[i].mode == mode:
continue
ss = list(s)
ss[i] = mode
if isok(ss):
self._slices[i].mode = mode
return
else:
raise RuntimeError("Corrupted Data Slice")
@property
def slice(self):
"""
A description of the slice through the dataset
A tuple of lenght equal to the dimensionality of the data
Each element is an integer, 'x', or 'y'
'x' and 'y' indicate the horizontal and vertical orientation
of the slice
"""
if self.ndim < 3:
return {0: tuple(), 1: ('x',), 2: ('y', 'x')}[self.ndim]
return tuple(s.mode if s.mode != 'slice' else s.slice_center
for s in self._slices)
@slice.setter
def slice(self, value):
for v, s in zip(value, self._slices):
if v in ['x', 'y']:
s.mode = v
else:
s.mode = 'slice'
s.slice_center = v
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/data_slice_widget.py",
"copies": "1",
"size": "6642",
"license": "bsd-3-clause",
"hash": -7086894498370508000,
"line_mean": 27.5064377682,
"line_max": 75,
"alpha_frac": 0.5264980428,
"autogenerated": false,
"ratio": 4.128029832193909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5154527874993909,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from .compatibility import wraps
__all__ = ('unwrap', 'kwargs_decorator')
def unwrap(func, stop=None):
"""Backport of Python 3.4's inspect.unwrap function.
Retrieves the object wrapped by func, following the chain of __wrapped__
attributes to reach the originally wrapped object.
Allows an optional stop callback which accepts the *current* function
as its only argument that will allow unwrapping to stop early if it
returns True.
Raises ValueError if a cycle is encountered.
If Python <3.3, use ``toolshed.update_wrapper`` and ``toolshed.wraps``
to emulate behavior expected here.
>>> my_func = lambda x,y: x+y
>>> my_wrapper = update_wrapper(lambda *a: my_func(*a), my_func)
>>> f = toolshed.update_wrapper()
>>> unwrap(f) is my_func
... True
"""
if stop is None:
_is_wrapper = lambda f: hasattr(f, '__wrapped__')
else:
_is_wrapper = lambda f: hasattr(f, '__wrapped__') and not stop(f)
# remember original function for error reporting
f = func
# memoize by id(f) to tolerate non hashable objects
memo = {id(func)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError("wrapper loop when unwrapping {!r}".format(f))
memo.add(id_func)
return func
def kwargs_decorator(deco):
"""A decorator to help create decorators that accept keyword arguments.
It's a relatively simple trick of checking in the closure if it's
received the func parameter. If not, a partial with all the current
keywords is returned. Otherwise, the decorator is run with the provided
function and keyword arguments.
>>> @kwargs_decorator
... def my_kwarg_deco(f, pre, post):
... @wraps(f)
... def wrapper(*a, **k):
... print(pre)
... res = f(*a, **k)
... print(post)
... return res
... return wrapper
Using this new decorator is similar to using any other, except keyword
arguments can be passed:
>>> @my_kwargs_deco(pre='Hello!', post='Goodbye!')
... def say_name(name):
... print(name)
Something else that is nice, is that if default values are provided to
the decorator, it can be used just like a regular decorator -- that is,
without the explicit invocation.
>>> @kwargs_decorator
... def deco_with_defaults(f, pre='hello', post='goodbye'):
... @wraps(f)
... def wrapper(*a, **k):
... print(pre)
... res = f(*a, **k)
... print(post)
... return res
... return wrapper
...
>>> @deco_with_defaults
... def say_name(name):
... print(name)
It should be noted that the created decorator isn't really reentrant, so
stacking like this isn't possible:
>>> @my_kwarg_deco(pre='hello')
... @my_kwarg_deco(post='goodbye')
... def my_cool_func():
... return None
This decorator can also be used on classes as well (which requires 2.6+):
>>> @kwarg_decorator
... class pre_post_print(object):
... def __init__(self, f, pre='', post=''):
.. self.f = f
... self.pre = pre
... self.post = post
...
... def __call__(self, *a, **k):
... print(self.pre)
... r = self.f(*a, **k)
... print(self.post)
... return r
"""
@wraps(deco)
def partialler(func=None, **kwargs):
if func is None:
return partial(partialler, **kwargs)
else:
return deco(func, **kwargs)
return partialler
| {
"repo_name": "justanr/toolshed",
"path": "toolshed/funcs.py",
"copies": "1",
"size": "3718",
"license": "bsd-3-clause",
"hash": 3194004190168260600,
"line_mean": 30.5084745763,
"line_max": 77,
"alpha_frac": 0.5790747714,
"autogenerated": false,
"ratio": 3.840909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9917219555044492,
"avg_score": 0.0005528614529198982,
"num_lines": 118
} |
from functools import partial
from compat import izip
from generics import GenericType, is_generic, ImpT
def split_implicits(types):
cTypes = []
imps = []
for t in types:
(cTypes if isinstance(t, type) else imps).append(t)
return cTypes, imps
def resolve_generics(types, ret):
generics = set()
for ts in (types, (ret,)):
for t in ts:
if not is_generic(t): continue
if isinstance(t, ImpT):
generics.update(t.get_generics())
else:
generics.add(t)
return list(generics)
class Function(object):
def __init__(self, types, ret, f, allow_splats=False):
assert isinstance(types, tuple)
assert all(isinstance(t, (ImpT, type)) for t in types)
assert isinstance(ret, type)
if not allow_splats:
assert f.func_code.co_argcount == len(types)
# Total types
self.types = types
# Concrete, passed in types
self.cTypes, self.implicits = split_implicits(types)
self.generics = resolve_generics(types, ret)
self.polymorphic = len(self.generics) > 0
self.cardinality = len(types)
self.ret = ret
self.f = f
def _resolveArgs(self, args):
new_args = []
it = iter(args)
for t in self.types:
new_args.append(t.get() if isinstance(t, ImpT) else next(it))
return tuple(new_args)
def __call__(self, *args):
if __debug__:
assert len(args) == self.cardinality - len(self.implicits)
# Generic? Gotta build a fully formed function
if self.polymorphic:
return self._callGeneric(args)
if __debug__:
for p, t in izip(args, self.types):
assert isinstance(p, t), \
"%s: Expected %s, got %s" % (
self.f.__name__, t.__name__, type(p).__name__
)
r_args = self._resolveArgs(args)
ret = self.f(*r_args)
if __debug__:
assert isinstance(ret, self.ret), \
"%s: Expected %s, got %s" % (
self.f.__name__, self.ret.__name__, type(ret).__name__
)
return ret
def __get__(self, inst, cls):
if inst is None: return self
_self = self
f = partial(self, inst)
f.__name__ = self.f.__name__
return Function(tuple(self.types[1:]), self.ret, f, allow_splats=True)
def _callGeneric(self, args):
gs = [type(t) for T, t in izip(self.cTypes, args)
if is_generic(T)]
new_f = self[tuple(gs)]
# Return type _must_ be known at call time
assert not is_generic(new_f.ret)
return new_f(*args)
def __getitem__(self, ts):
"""
Take generics, make them less generic!
"""
ts = ts if isinstance(ts, tuple) else (ts,)
assert len(self.generics) == len(ts)
gen_map = {g: t for g, t in izip(self.generics, ts)}
new_types = []
for t in self.types:
new_t = t.resolve(gen_map) \
if isinstance(t, ImpT) else gen_map.get(t, t)
new_types.append(new_t)
# Resolve return type, if we can
ret = gen_map[self.ret] if is_generic(self.ret) else self.ret
return Function(tuple(new_types), ret, self.f)
def andThen(self, func):
assert isinstance(func, Function)
assert func.cardinality == 1
assert issubclass(self.ret, func.types[0])
def _f(*args, **kwargs):
return func(*self(*args, **kwargs))
return Function(self.types, func.ret, _f)
def compose(self, func):
return func.andThen(self)
def _curry(self, leftVs, rightTs, ret, f):
if len(rightTs) == 0:
return f(*leftVs)
newRet = ret if len(rightTs) == 1 else Function
@Func(rightTs[0], newRet)
def _f(rightT):
return self._curry(leftVs + (rightT,), rightTs[1:], ret, f)
return _f
@property
def curry(self):
if self.cardinality > 1:
return self._curry((), self.types, self.ret, self.f)
return self
def __str__(self):
if self.polymorphic:
gtypes = '[%s]' % ", ".join(t.__name__ for t in self.generics)
else:
gtypes = ''
return "%s%s(%s): %s" % (self.f.__name__,
gtypes,
", ".join(t.__name__ for t in self.types),
self.ret.__name__)
__repr__ = __str__
def tEquals(self, f):
if not isinstance(f, Function): return False
if self.cardinality != f.cardinality: return False
if not all(t1==t2 for t1, t2 in izip(self.types, f.types)): return False
if len(self.generics) != len(f.generics): return False
if len(set(self.generics) ^ set(f.generics)) > 0: return False
if self.ret != f.ret: return False
return True
def F0(ret):
return partial(Function, (), ret)
def FUnit(*types):
return partial(Function, types, NoneType)
def Func(*types):
assert len(types) > 0
return partial(Function, tuple(types[:-1]), types[-1])
def is_polymorphic(v, on_types=None):
"""
Tests if a function is polymorphic. If `on_types` is provided, checks
to see if a function is generic on the set of provided types.
"""
if isinstance(v, Function) and v.polymorphic:
if on_types is not None:
gens = set(on_types)
return len(gens.intersection(v.generics)) > 0
else:
return True
return False
| {
"repo_name": "Refefer/ptype",
"path": "ptype/function.py",
"copies": "1",
"size": "5727",
"license": "apache-2.0",
"hash": -8863907628352269000,
"line_mean": 28.5206185567,
"line_max": 80,
"alpha_frac": 0.5351842151,
"autogenerated": false,
"ratio": 3.6664532650448143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673678545887575,
"avg_score": 0.005591786851447856,
"num_lines": 194
} |
from functools import partial
from .compat import wraps
__all__ = ('optional_kwargs', 'annotate', 'method_optional_kwargs')
def optional_kwargs(deco):
"""This is a decorator-decorator (a metadecorator if you will) that allows
creating decorators that accept keyword arguments. It's a relatively
simple trick of checking if the wrapper -- which stands in for the actual
decorator -- has received the func parameter.and if not, a partial of
the wrapper (decorator) is returned instead. This will continue to
happen until the func param is filled.
.. code-block:: python
@optional_kwargs
def kwarg_deco(f, pre, post):
def wrapper(*a, **k):
print(pre)
r = f(*a, **k)
print(post)
return r
return wrapper
Using the created decorator and the function it wraps is the same as
any other decorator and wrapped function:
.. code-block:: python
@kwarg_deco(pre="Hello!", post="Goodbye!")
def greet(name):
print(name)
And...
.. code-block:: python
>>> greet('Alec')
Hello!
Alec
Goodbye!
The created decorator, however, is not reentrant, meaning they can't
be stacked like this:
.. code-block:: Python
@kwarg_deco(pre="Hello!")
@kwarg_deco(post='Goodbye!')
def greet(name):
print(name)
TypeError: kwarg_deco() missing 1 required positional argument: 'pre'
But by providing default arguments, it can be made to appear reentrant:
.. code-block:: Python
@optional_kwargs
def my_kwarg_deco(f, pre='', post=''):
# as before
@my_kwarg_deco(pre='Hello!')
@my_kwarg_deco(post='Goodbye!')
def greet(name):
print(name)
Note the extra white space that results from calling this version::
Hello!
Alec
Goodbye!
Because the created decorator isn't truly reentrant, the created decorator
is actually run twice.
This decorator can be used on classes as well:
.. code-block:: Python
@kwarg_decorator
class pre_post_print(object):
def __init__(self, f, pre='', post=''):
self.f = f
self.pre = pre
self.post = post
def __call__(self, *a, **k):
print(self.pre)
r = self.f(*a, **k)
print(self.post)
return r
@pre_post_print(pre='hello', post='goodbye')
def greet(name):
print(name)
Calling greet results in:
.. code-block:: Python
hello
alec
goodbye
However, to use it as a class decorator requires Python 2,6+.
"""
@wraps(deco)
def wrapper(func=None, **kwargs):
if func is None:
return partial(wrapper, **kwargs)
return deco(func, **kwargs)
return wrapper
@optional_kwargs
def annotate(func, type):
"""Decorator for adding Haskell style type annotations to a function's
docstring on the fly.
"""
old_doc = getattr(func, '__doc__') or ''
if old_doc:
old_doc = '\n\n{}'.format(old_doc)
func.__doc__ = "{!s} :: {!s}{!s}".format(func.__name__,
type,
old_doc)
return func
class method_optional_kwargs(object):
"""Descriptor based take on optional_kwargs used for method decorators.
Can be used to wrap regular instance methods, classmethods and
staticmethods. Currently does npt function on other sorts of descriptors.
"""
def __init__(self, method):
self.method = method
self._deco = None
def __get__(self, instance, cls):
if self._deco:
return self._deco
if hasattr(self.method, '__func__'): # classmethor or staticmethod
method = self.method.__func__
if isinstance(self.method, classmethod):
inst_or_cls = cls
else:
inst_or_cls = None
else: # regular method TODO: what about other descriptor types?
inst_or_cls = instance
method = self.method
@wraps(method)
@optional_kwargs
def deco(func, **kwargs):
if inst_or_cls:
return method(inst_or_cls, func, **kwargs)
else:
return method(func, **kwargs)
self._deco = deco
return deco
| {
"repo_name": "justanr/pynads",
"path": "pynads/utils/decorators.py",
"copies": "1",
"size": "4578",
"license": "mit",
"hash": 3109199107360857000,
"line_mean": 27.6125,
"line_max": 78,
"alpha_frac": 0.5548274356,
"autogenerated": false,
"ratio": 4.286516853932584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5341344289532584,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from compony.core import create_element
def generate_element_function(tag_name, self_closing=False, multiline=True):
"""
(tag_name: str, self_closing: bool, multiline: bool) -> Callable
"""
return partial(create_element, tag_name, self_closing, multiline)
e = generate_element_function
# basic elements
html = e('html')
# document metadata
base = e('base', True)
head = e('head')
link = e('link', True)
meta = e('meta', True)
style = e('style')
title = e('title', False, False)
# content sectioning
address = e('address')
article = e('article')
body = e('body')
footer = e('footer')
header = e('header')
h1 = e('h1')
h2 = e('h2')
h3 = e('h3')
h4 = e('h4')
h5 = e('h5')
h6 = e('h6')
hgroup = e('hgroup')
nav = e('nav')
section = e('section')
# text content
dd = e('dd')
div = e('div')
dl = e('dl')
dt = e('dt')
figcaption = e('figcaption')
figure = e('figure')
hr = e('hr', True)
li = e('li')
main = e('main')
ol = e('ol')
p = e('p')
pre = e('pre')
ul = e('ul')
# inline text semantics
a = e('a', False, False)
abbr = e('abbr', False, False)
b = e('b', False, False)
bdi = e('bdi', False, False)
bdo = e('bdo', False, False)
br = e('br', False, False)
cite = e('cite', False, False)
code = e('code', False, False)
data = e('data', False, False)
dfn = e('dfn', False, False)
em = e('em', False, False)
i = e('i', False, False)
kbd = e('kbd', False, False)
mark = e('mark', False, False)
q = e('q', False, False)
rp = e('rp', False, False)
rt = e('rt', False, False)
rtc = e('rtc', False, False)
ruby = e('ruby', False, False)
s = e('s', False, False)
samp = e('samp', False, False)
small = e('small', False, False)
span = e('span', False, False)
strong = e('strong', False, False)
sub = e('sub', False, False)
sup = e('sup', False, False)
time = e('time', False, False)
u = e('u', False, False)
var = e('var', False, False)
wbr = e('wbr', False, False)
# image & multimedia
area = e('area')
audio = e('audio', True)
img = e('img', True)
map = e('map')
track = e('track', True)
video = e('video', True)
# embedded content
embed = e('embed', True)
iframe = e('iframe')
object = e('object')
param = e('param', True)
source = e('source', True)
# scripting
canvas = e('canvas')
noscript = e('noscript')
script = e('script')
# edits
e_del = e('del') #del is reserved word :)
ins = e('ins')
# table content
caption = e('caption')
col = e('col')
colgroup = e('colgroup')
table = e('table')
tbody = e('tbody')
td = e('td')
tfoot = e('tfoot')
th = e('th')
thead = e('thead')
tr = e('tr')
# forms
button = e('button', False, False)
datalist = e('datalist')
fieldset = e('fieldset')
form = e('form')
input = e('input', False, False)
keygen = e('keygen', True)
label = e('label')
legend = e('legend')
meter = e('meter')
optgroup = e('optgroup')
option = e('option', False, False)
output = e('output')
progress = e('progress')
select = e('select')
textarea = e('textarea')
# interactive elements
details = e('details')
dialog = e('dialog')
menu = e('menu')
menuitem = e('menuitem')
summary = e('summary')
# web components
content = e('content')
element = e('element')
shadow = e('shadow')
template = e('template')
| {
"repo_name": "mbylstra/compony",
"path": "compony/elements.py",
"copies": "1",
"size": "3162",
"license": "mit",
"hash": 8886071996029545000,
"line_mean": 19.6666666667,
"line_max": 76,
"alpha_frac": 0.6084756483,
"autogenerated": false,
"ratio": 2.7118353344768438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38203109827768433,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from context import Scope, Symbol
from type_objects import Bool, Num, Str, List, Tuple, Set, BaseTuple, \
Dict, Function, Instance, Unknown, NoneType, Class, Union, Maybe
from evaluate import static_evaluate, UnknownValue
from util import unify_types, type_intersection, type_subset
from assign import assign
from function import construct_function_type
from inference import maybe_inferences
def get_token(node):
return node.__class__.__name__
def assign_generators(generators, context, warnings):
for generator in generators:
assign(generator.target, generator.iter, context,
warnings, generator=True)
def comprehension_type(element, generators, expected_element_type,
context, warnings):
context.begin_scope()
assign_generators(generators, context, warnings)
element_type = visit_expression(element, expected_element_type,
context, warnings)
context.end_scope()
return element_type
class NullWarnings(object):
def warn(self, node, category, details=None):
pass
# Note: "True" and "False" evalute to Bool because they are symbol
# names that have their types builtin to the default context. Similarly,
# "None" has type NoneType.
# If type cannot be positively determined, then this will return Unknown.
# Note that this does not mean that errors will always return Unknown, for
# example, 2 / 'a' will still return Num because the division operator
# must always return Num. Similarly, "[1,2,3] + Unknown" will return List(Num)
def visit_expression(node, expected_type, context, warnings=NullWarnings()):
result_type = _visit_expression(node, expected_type, context, warnings)
if (not type_subset(result_type, expected_type)
and not isinstance(result_type, Unknown)):
details = '{0} vs {1}'.format(result_type, expected_type)
warnings.warn(node, 'type-error', details)
return result_type
# Example: len(2*2) we can either have an error that len does not accept
# a numeric argument, or that the first parameter of the asterisk should
# have been a string. The former seems more intuitive, so we should check
# for the expected type implications only after doing constructive checks.
def _visit_expression(node, expected_type, context, warnings):
recur = partial(visit_expression, context=context, warnings=warnings)
probe = partial(expression_type, context=context)
comp = partial(comprehension_type, context=context, warnings=warnings)
token = get_token(node)
if token == 'BoolOp':
for expr in node.values:
recur(expr, Bool())
return Bool() # more restrictive than Python
if token == 'BinOp':
operator = get_token(node.op)
if operator == 'Add':
left_probe = probe(node.left)
right_probe = probe(node.right)
if isinstance(left_probe, Tuple) or isinstance(right_probe, Tuple):
left = recur(node.left, BaseTuple())
right = recur(node.right, BaseTuple())
if isinstance(left, Tuple) and isinstance(right, Tuple):
return Tuple(left.item_types + right.item_types)
else:
return Unknown()
union_type = Union(Num(), Str(), List(Unknown()))
left_intersect = type_intersection(left_probe, union_type)
right_intersect = type_intersection(right_probe, union_type)
sub_intersect = type_intersection(left_intersect, right_intersect)
full_intersect = type_intersection(expected_type, sub_intersect)
intersect = (full_intersect or sub_intersect or left_intersect
or right_intersect or union_type)
recur(node.left, intersect)
recur(node.right, intersect)
return intersect
elif operator == 'Mult':
union_type = Union(Num(), Str())
expected_intersect = type_intersection(expected_type, union_type)
left_intersect = type_intersection(probe(node.left), union_type)
right = recur(node.right, Num())
if isinstance(left_intersect, Num):
recur(node.left, Num())
return Num()
elif isinstance(left_intersect, Str):
recur(node.left, Str())
return Str()
elif isinstance(expected_intersect, Num):
recur(node.left, Num())
return Num()
elif isinstance(expected_intersect, Str):
recur(node.left, Str())
return Str()
else:
recur(node.left, union_type)
return union_type
elif operator == 'Mod':
# num % num OR str % unknown
union_type = Union(Num(), Str())
expected_intersect = type_intersection(expected_type, union_type)
left_intersect = type_intersection(probe(node.left), union_type)
if isinstance(left_intersect, Num):
recur(node.left, Num())
recur(node.right, Num())
return Num()
elif isinstance(left_intersect, Str):
recur(node.left, Str())
recur(node.right, Unknown())
return Str()
elif isinstance(expected_intersect, Num):
recur(node.left, Num())
recur(node.right, Num())
return Num()
elif isinstance(expected_intersect, Str):
recur(node.left, Str())
recur(node.right, Unknown())
return Str()
else:
recur(node.left, union_type)
recur(node.right, Unknown())
return union_type
else:
recur(node.left, Num())
recur(node.right, Num())
return Num()
if token == 'UnaryOp':
if get_token(node.op) == 'Not':
recur(node.operand, Bool())
return Bool()
else:
recur(node.operand, Num())
return Num()
if token == 'Lambda':
return construct_function_type(node, LambdaVisitor(context))
if token == 'IfExp':
recur(node.test, Bool())
if_inferences, else_inferences = maybe_inferences(node.test, context)
context.begin_scope(Scope(if_inferences))
body_type = recur(node.body, expected_type)
context.end_scope()
context.begin_scope(Scope(else_inferences))
else_type = recur(node.orelse, expected_type)
context.end_scope()
return unify_types([body_type, else_type])
if token == 'Dict':
key_type = unify_types([recur(key, Unknown()) for key in node.keys])
value_type = unify_types([recur(value, Unknown())
for value in node.values])
return Dict(key_type, value_type)
if token == 'Set':
subtype = (expected_type.item_type if isinstance(expected_type, Set)
else Unknown())
return Set(unify_types([recur(elt, Unknown()) for elt in node.elts]))
if token == 'ListComp':
subtype = (expected_type.item_type if isinstance(expected_type, List)
else Unknown())
return List(comp(node.elt, node.generators, subtype))
if token == 'SetComp':
subtype = (expected_type.item_type if isinstance(expected_type, Set)
else Unknown())
return Set(comp(node.elt, node.generators, subtype))
if token == 'DictComp':
expected_key_type = (expected_type.key_type
if isinstance(expected_type, Dict)
else Unknown())
expected_value_type = (expected_type.value_type
if isinstance(expected_type, Dict)
else Unknown())
key_type = comp(node.key, node.generators, expected_key_type)
value_type = comp(node.value, node.generators, expected_value_type)
return Dict(key_type, value_type)
if token == 'GeneratorExp':
subtype = (expected_type.item_type if isinstance(expected_type, List)
else Unknown())
return List(comp(node.elt, node.generators, subtype))
if token == 'Yield':
return List(recur(node.value, Unknown()))
if token == 'Compare':
operator = get_token(node.ops[0])
if len(node.ops) > 1 or len(node.comparators) > 1:
warnings.warn(node, 'comparison-operator-chaining')
if operator in ['Eq', 'NotEq', 'Lt', 'LtE', 'Gt', 'GtE']:
# all operands are constrained to have the same type
# as their intersection
left_probe = probe(node.left)
right_probe = probe(node.comparators[0])
intersection = type_intersection(left_probe, right_probe)
if intersection is None:
recur(node.left, right_probe)
recur(node.comparators[0], left_probe)
else:
recur(node.left, intersection)
recur(node.comparators[0], intersection)
if operator in ['Is', 'IsNot']:
recur(node.left, Maybe(Unknown()))
recur(node.comparators[0], NoneType())
if operator in ['In', 'NotIn']:
# constrain right to list/set of left, and left to inst. of right
left_probe = probe(node.left)
right_probe = probe(node.comparators[0])
union_type = Union(List(left_probe), Set(left_probe),
Dict(left_probe, Unknown()), Str())
recur(node.comparators[0], union_type)
if isinstance(right_probe, (List, Set)):
recur(node.left, right_probe.item_type)
elif isinstance(right_probe, Dict):
recur(node.left, right_probe.key_type)
else:
recur(node.left, Unknown())
return Bool()
if token == 'Call':
function_type = recur(node.func, Unknown())
if not isinstance(function_type, (Class, Function)):
if not isinstance(function_type, Unknown):
warnings.warn(node, 'not-a-function')
return Unknown()
signature = function_type.signature
instance = (function_type.instance
if isinstance(function_type, Function) else None)
offset = 1 if (instance is not None
or isinstance(function_type, Class)) else 0
argument_scope = Scope()
if instance is not None:
self_symbol = Symbol(signature.names[0], instance)
argument_scope.add(self_symbol)
# make sure all required arguments are specified
if node.starargs is None and node.kwargs is None:
start = offset + len(node.args)
required = signature.names[start:signature.min_count]
kwarg_names = [keyword.arg for keyword in node.keywords]
missing = [name for name in required if name not in kwarg_names]
for missing_argument in missing:
warnings.warn(node, 'missing-argument', missing_argument)
# check for too many arguments
if signature.vararg_name is None:
if len(node.args) + len(node.keywords) > len(signature.types):
warnings.warn(node, 'too-many-arguments')
# load positional arguments
for i, arg in enumerate(node.args):
if i + offset >= len(signature):
break
arg_type = recur(arg, signature.types[i + offset])
value = static_evaluate(arg, context)
argument_scope.add(Symbol(signature.names[i + offset],
arg_type, value))
# load keyword arguments
for kwarg in node.keywords:
# TODO: make sure there is no overlap with positional args
expected_type = signature.get_dict().get(kwarg.arg)
if expected_type is None:
warnings.warn(node, 'extra-keyword', kwarg.arg)
else:
arg_type = recur(kwarg.value, expected_type)
value = static_evaluate(kwarg.value, context)
argument_scope.add(Symbol(kwarg.arg, arg_type, value))
if node.starargs is not None:
recur(node.starargs, List(Unknown()))
if node.kwargs is not None:
recur(node.kwargs, Dict(Unknown(), Unknown()))
return_type, _ = function_type.evaluator.evaluate(argument_scope)
return return_type
if token == 'Repr':
return Str()
if token == 'Num':
return Num()
if token == 'Str':
return Str()
if token == 'Attribute':
value_type = recur(node.value, Unknown())
if isinstance(value_type, Unknown):
return Unknown()
if not isinstance(value_type, Instance):
warnings.warn(node, 'not-an-instance')
return Unknown()
attr_type = value_type.attributes.get_type(node.attr)
if attr_type is None:
warnings.warn(node, 'not-a-member')
return Unknown()
return attr_type
if token == 'Subscript':
union_type = Union(List(Unknown()), Dict(Unknown(), Unknown()),
BaseTuple())
value_type = recur(node.value, union_type)
if get_token(node.slice) == 'Index':
if isinstance(value_type, Tuple):
index = static_evaluate(node.slice.value, context)
if isinstance(index, UnknownValue):
return Unknown()
if not isinstance(index, int):
return Unknown()
if not 0 <= index < len(value_type.item_types):
return Unknown()
return value_type.item_types[index]
elif isinstance(value_type, List):
return value_type.item_type
elif isinstance(value_type, Dict):
return value_type.value_type
else:
return Unknown()
elif get_token(node.slice) == 'Slice':
if node.slice.lower is not None:
recur(node.slice.lower, Num())
if node.slice.upper is not None:
recur(node.slice.upper, Num())
if node.slice.step is not None:
recur(node.slice.step, Num())
return value_type
else:
return value_type
if token == 'Name':
defined_type = context.get_type(node.id)
if defined_type is None:
warnings.warn(node, 'undefined', node.id)
context.add_constraint(node.id, expected_type)
return defined_type or Unknown()
if token == 'List':
subtype = (expected_type.item_type if isinstance(expected_type, List)
else Unknown())
return List(unify_types([recur(elt, subtype) for elt in node.elts]))
if token == 'Tuple':
if (isinstance(expected_type, Tuple)
and len(node.elts) == len(expected_type.item_types)):
return Tuple([recur(element, type_) for element, type_ in
zip(node.elts, expected_type.item_types)])
return Tuple([recur(element, Unknown()) for element in node.elts])
raise Exception('visit_expression does not recognize ' + token)
class LambdaVisitor(object):
def __init__(self, context):
self._context = context
def clone(self):
return self
def context(self):
return self._context
def visit(self, expression):
result_type = expression_type(expression, self._context)
symbol = Symbol('', result_type)
self._context.set_return(symbol)
def begin_scope(self):
self._context.begin_scope()
def end_scope(self):
return self._context.end_scope()
def merge_scope(self, scope):
self._context.merge_scope(scope)
def expression_type(expr, context):
return visit_expression(expr, Unknown(), context, NullWarnings())
| {
"repo_name": "clark800/pystarch",
"path": "backend/expr.py",
"copies": "1",
"size": "16129",
"license": "mit",
"hash": 6330930879317508000,
"line_mean": 42.1256684492,
"line_max": 79,
"alpha_frac": 0.5828631657,
"autogenerated": false,
"ratio": 4.30221392371299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.538507708941299,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from Cookie import SimpleCookie
from intranet3.asyncfetchers.trac import TracFetcher
from intranet3.asyncfetchers.base import FetchException, cached_bug_fetcher
from intranet3.log import INFO_LOG, DEBUG_LOG
LOG = INFO_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
class CookieTracFetcher(TracFetcher):
def __init__(self, *args, **kwargs):
super(CookieTracFetcher, self).__init__(*args, **kwargs)
self.auth_token = None
def get_headers(self):
headers = super(CookieTracFetcher, self).get_headers()
if self.auth_token:
headers['Cookie'] = ['trac_auth=%s' % (self.auth_token, )]
return headers
@cached_bug_fetcher(lambda: u'user')
def fetch_user_tickets(self):
""" Start fetching tickets for current user """
self.fetch_auth_token(partial(TracFetcher.fetch_user_tickets, self))
@cached_bug_fetcher(lambda: u'all')
def fetch_all_tickets(self):
""" Start fetching tickets for all users in mapping """
self.fetch_auth_token(partial(TracFetcher.fetch_all_tickets, self))
@cached_bug_fetcher(lambda: u'user-resolved')
def fetch_user_resolved_tickets(self):
self.fetch_auth_token(partial(TracFetcher.fetch_user_resolved_tickets, self))
@cached_bug_fetcher(lambda: u'all-resolved')
def fetch_all_resolved_tickets(self):
self.fetch_auth_token(partial(TracFetcher.fetch_all_resolved_tickets, self))
def fetch_bugs_for_query(self, ticket_ids, project_selector, component_selector, version):
self.fetch_auth_token(partial(TracFetcher.fetch_bugs_for_query, self, ticket_ids, project_selector, component_selector, version))
def fetch_resolved_bugs_for_query(self, ticket_ids, project_selector, component_selector, version):
self.fetch_auth_token(partial(TracFetcher.fetch_resolved_bugs_for_query, self, project_selector, component_selector, version))
def fetch_dependons_for_ticket_ids(self, ticket_ids):
self.fetch_auth_token(partial(TracFetcher.fetch_dependons_for_ticket_ids, self, ticket_ids))
def fetch_bug_titles_and_depends_on(self, ticket_ids):
self.fetch_auth_token(partial(TracFetcher.fetch_bug_titles_and_depends_on, self, ticket_ids))
def fetch_auth_token(self, callback):
headers = self.get_headers()
self.request(self.tracker.url.encode('utf-8') + '/login', headers, partial(self.on_auth_token_responded, callback))
def on_auth_token_responded(self, callback, resp):
DEBUG(u'Auth token response code %s' % (resp.code, ))
if resp.code == 302:
headers = resp.headers
if headers.hasHeader('Set-Cookie'):
header = resp.headers.getRawHeaders('Set-Cookie')
if header:
header = '; '.join(header)
cookie = SimpleCookie(header)
token = cookie.get('trac_auth')
if not token:
self.fail(ValueError(u'Auth token not found'))
else:
self.auth_token = token.value
DEBUG(u'Issuing on_auth_token_responded callback')
callback()
else:
self.fail(ValueError(u'No cookie found'))
else:
self.fail(FetchException(u'Received response %s' % (resp.code, )))
| {
"repo_name": "pytlakp/intranetref",
"path": "src/intranet3/asyncfetchers/cookietrac.py",
"copies": "1",
"size": "3421",
"license": "mit",
"hash": -4869953910187609000,
"line_mean": 44.0131578947,
"line_max": 137,
"alpha_frac": 0.6407483192,
"autogenerated": false,
"ratio": 3.6706008583690988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971745931424916,
"avg_score": 0.01877797266398765,
"num_lines": 76
} |
from functools import partial
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from dimagi.utils.dates import force_to_datetime
import fluff
from corehq.fluff.calculators.case import CasePropertyFilter
from custom.world_vision import WORLD_VISION_DOMAINS
from corehq.apps.users.models import CommCareUser
from custom.utils.utils import flat_field
from custom.world_vision import user_calcs
from casexml.apps.case.models import CommCareCase
from django.utils.dateformat import format
WV_DELETED_TYPES = ('CommCareCase-Deleted', )
class WorldVisionMotherFluff(fluff.IndicatorDocument):
def case_property(property):
return flat_field(lambda case: case.get_case_property(property))
document_class = CommCareCase
document_filter = CasePropertyFilter(type='ttc_mother')
deleted_types = WV_DELETED_TYPES
domains = WORLD_VISION_DOMAINS
group_by = ('domain', 'user_id')
name = flat_field(lambda case: case.name)
lvl_4 = case_property('phc')
lvl_3 = case_property('block')
lvl_2 = case_property('district')
lvl_1 = case_property('state')
reason_for_mother_closure = flat_field(lambda case: case.reason_for_mother_closure if hasattr(case, 'reason_for_mother_closure')
and case.reason_for_mother_closure else 'unknown')
mother_state = case_property('mother_state')
fp_method = case_property('fp_method')
anc_1 = case_property('anc_1')
anc_2 = case_property('anc_2')
anc_3 = case_property('anc_3')
anc_4 = case_property('anc_4')
tt_1 = case_property('tt_1')
tt_2 = case_property('tt_2')
tt_booster = case_property('tt_booster')
iron_folic = case_property('iron_folic')
completed_100_ifa = case_property('completed_100_ifa')
anemia_signs = case_property('anemia_signs')
currently_referred = case_property('currently_referred')
knows_closest_facility = case_property('knows_closest_facility')
edd = case_property('edd')
previous_tetanus = case_property('previous_tetanus')
pp_1_done = case_property('pp_1_done')
pp_2_done = case_property('pp_2_done')
pp_3_done = case_property('pp_3_done')
pp_4_done = case_property('pp_4_done')
delivery_date = case_property('delivery_date')
cause_of_death_maternal = case_property('cause_of_death_maternal')
place_of_birth = case_property('place_of_birth')
birth_attendant_during_delivery = case_property('birth_attendant_during_delivery')
type_of_delivery = case_property('type_of_delivery')
date_of_mother_death = case_property('date_of_mother_death')
number_of_children = user_calcs.NumberChildren()
number_of_boys = user_calcs.NumberBoys()
number_of_girls = user_calcs.NumberGirls()
number_of_children_born_dead = user_calcs.StillBirth()
opened_on = flat_field(lambda case: case.opened_on.date() if case.opened_on else None)
closed_on = flat_field(lambda case: case.closed_on.date() if case.closed_on else None)
women_registered = user_calcs.MotherRegistered()
def referenced_case_attribute(case, field_name):
if not (case.indices and case.indices[0]['referenced_id']):
return ""
referenced_case = CaseAccessors(case.domain).get_case(case.indices[0]['referenced_id'])
if hasattr(referenced_case, field_name):
return getattr(referenced_case, field_name)
else:
return ""
def get_datepart(case, t='n'):
child_date_of_death = case.get_case_property('child_date_of_death')
if child_date_of_death:
return format(force_to_datetime(child_date_of_death), t)
else:
return ""
def calculate_weight(case):
weight_birth = case.get_case_property('weight_birth')
if weight_birth:
#Probably measured in grams. Should be converted to kilograms
if float(weight_birth) > 10:
return str(float(weight_birth) / 1000.0)
else:
return weight_birth
return ""
# This calculator is necessary to generate 'date' field which is required in the database
class Numerator(fluff.Calculator):
@fluff.null_emitter
def numerator(self, case):
yield None
class WorldVisionHierarchyFluff(fluff.IndicatorDocument):
def user_data(property):
"""
returns a flat field with a callable looking for `property` on the user
"""
return flat_field(lambda user: user.user_data.get(property))
document_class = CommCareUser
domains = WORLD_VISION_DOMAINS
group_by = ('domain',)
numerator = Numerator()
lvl_4 = user_data('phc')
lvl_3 = user_data('block')
lvl_2 = user_data('district')
lvl_1 = user_data('state')
class WorldVisionChildFluff(fluff.IndicatorDocument):
def case_property(property):
return flat_field(lambda case: case.get_case_property(property))
document_class = CommCareCase
document_filter = CasePropertyFilter(type='ttc_child')
deleted_types = WV_DELETED_TYPES
domains = WORLD_VISION_DOMAINS
group_by = ('domain', 'user_id')
name = flat_field(lambda case: case.name)
mother_id = flat_field(lambda case: case.indices[0]['referenced_id'] if case.indices else None)
lvl_4 = flat_field(partial(referenced_case_attribute, field_name='phc'))
lvl_3 = flat_field(partial(referenced_case_attribute, field_name='block'))
lvl_2 = flat_field(partial(referenced_case_attribute, field_name='district'))
lvl_1 = flat_field(partial(referenced_case_attribute, field_name='state'))
reason_for_child_closure = case_property('reason_for_child_closure')
bcg = case_property('bcg')
opv0 = case_property('opv0')
hepb0 = case_property('hepb0')
opv1 = case_property('opv1')
hepb1 = case_property('hepb1')
dpt1 = case_property('dpt1')
opv2 = case_property('opv2')
hepb2 = case_property('hepb2')
dpt2 = case_property('dpt2')
opv3 = case_property('opv3')
hepb3 = case_property('hepb3')
dpt3 = case_property('dpt3')
measles = case_property('measles')
vita1 = case_property('vita1')
vita2 = case_property('vita2')
dpt_opv_booster = case_property('dpt_opv_booster')
vita3 = case_property('vita3')
type_of_child_death = case_property('type_of_child_death')
cause_of_death_child = case_property('cause_of_death_child')
pneumonia_since_last_visit = case_property('pneumonia_since_last_visit')
has_diarrhea_since_last_visit = case_property('has_diarrhea_since_last_visit')
dairrhea_treated_with_ors = case_property('dairrhea_treated_with_ors')
dairrhea_treated_with_zinc = case_property('dairrhea_treated_with_zinc')
weight_birth = flat_field(calculate_weight)
breastfeed_1_hour = case_property('breastfeed_1_hour')
exclusive_breastfeeding = case_property('exclusive_breastfeeding')
comp_breastfeeding = case_property('comp_breastfeeding')
supplementary_feeding_baby = case_property('supplementary_feeding_baby')
deworm = case_property('deworm')
ebf_stop_age_month = case_property('ebf_stop_age_month')
gender = case_property('gender')
opened_on = flat_field(lambda case: case.opened_on)
closed_on = flat_field(lambda case: case.closed_on)
dob = flat_field(lambda case: case.dob)
date_of_death = case_property('child_date_of_death')
month_of_death = flat_field(get_datepart)
year_of_death = flat_field(partial(get_datepart, t='Y'))
women_registered = user_calcs.ChildRegistered()
WorldVisionMotherFluffPillow = WorldVisionMotherFluff.pillow()
WorldVisionChildFluffPillow = WorldVisionChildFluff.pillow()
WorldVisionHierarchyFluffPillow = WorldVisionHierarchyFluff.pillow()
| {
"repo_name": "qedsoftware/commcare-hq",
"path": "custom/world_vision/models.py",
"copies": "1",
"size": "7686",
"license": "bsd-3-clause",
"hash": 3901417431859225000,
"line_mean": 38.0152284264,
"line_max": 140,
"alpha_frac": 0.691907364,
"autogenerated": false,
"ratio": 3.2065081351689613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9395225520124705,
"avg_score": 0.0006379958088513071,
"num_lines": 197
} |
from functools import partial
from .core import unify, reify
from .variable import var
from .dispatch import dispatch
def unifiable(cls):
""" Register standard unify and reify operations on class
This uses the type and __dict__ or __slots__ attributes to define the
nature of the term
See Also:
>>> class A(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> unifiable(A)
<class 'unification.more.A'>
>>> x = var('x')
>>> a = A(1, 2)
>>> b = A(1, x)
>>> unify(a, b, {})
{~x: 2}
"""
_unify.add((cls, cls, dict), unify_object)
_reify.add((cls, dict), reify_object)
return cls
#########
# Reify #
#########
def reify_object(o, s):
""" Reify a Python object with a substitution
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> x = var('x')
>>> f = Foo(1, x)
>>> print(f)
Foo(1, ~x)
>>> print(reify_object(f, {x: 2}))
Foo(1, 2)
"""
if hasattr(o, '__slots__'):
return _reify_object_slots(o, s)
else:
return _reify_object_dict(o, s)
def _reify_object_dict(o, s):
obj = object.__new__(type(o))
d = reify(o.__dict__, s)
if d == o.__dict__:
return o
obj.__dict__.update(d)
return obj
def _reify_object_slots(o, s):
attrs = [getattr(o, attr) for attr in o.__slots__]
new_attrs = reify(attrs, s)
if attrs == new_attrs:
return o
else:
newobj = object.__new__(type(o))
for slot, attr in zip(o.__slots__, new_attrs):
setattr(newobj, slot, attr)
return newobj
@dispatch(slice, dict)
def _reify(o, s):
""" Reify a Python ``slice`` object """
return slice(*reify((o.start, o.stop, o.step), s))
#########
# Unify #
#########
def unify_object(u, v, s):
""" Unify two Python objects
Unifies their type and ``__dict__`` attributes
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> x = var('x')
>>> f = Foo(1, x)
>>> g = Foo(1, 2)
>>> unify_object(f, g, {})
{~x: 2}
"""
if type(u) != type(v):
return False
if hasattr(u, '__slots__'):
return unify([getattr(u, slot) for slot in u.__slots__],
[getattr(v, slot) for slot in v.__slots__],
s)
else:
return unify(u.__dict__, v.__dict__, s)
@dispatch(slice, slice, dict)
def _unify(u, v, s):
""" Unify a Python ``slice`` object """
return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
| {
"repo_name": "mrocklin/unification",
"path": "unification/more.py",
"copies": "1",
"size": "2874",
"license": "bsd-3-clause",
"hash": -7403693045690377000,
"line_mean": 21.992,
"line_max": 73,
"alpha_frac": 0.4846903271,
"autogenerated": false,
"ratio": 3.0770877944325483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9060444788199215,
"avg_score": 0.0002666666666666667,
"num_lines": 125
} |
from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.exceptions import StringSimilarityError
def khorsi_wrapper(w1, w2, freq_base,sequence_type, max_distance):
score = khorsi(w1, w2, freq_base = freq_base, sequence_type = sequence_type)
if score >= max_distance:
return score
else:
return None
def edit_distance_wrapper(w1, w2, sequence_type, max_distance):
score = edit_distance(w1, w2, sequence_type)
if score <= max_distance:
return score
else:
return None
def phono_edit_distance_wrapper(w1, w2, sequence_type, features, max_distance):
score = phono_edit_distance(w1, w2, sequence_type = sequence_type,features = features)
if score <= max_distance:
return score
else:
return None
def string_similarity(corpus_context, query, algorithm, **kwargs):
"""
This function computes similarity of pairs of words across a corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query: string, tuple, or list of tuples
If this is a string, every word in the corpus will be compared to it,
if this is a tuple with two strings, those words will be compared to
each other,
if this is a list of tuples, each tuple's strings will be compared to
each other.
algorithm: string
The algorithm of string similarity to be used, currently supports
'khorsi', 'edit_distance', and 'phono_edit_distance'
max_rel: double
Filters out all words that are higher than max_rel from a relatedness measure
min_rel: double
Filters out all words that are lower than min_rel from a relatedness measure
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list of tuples:
The first two elements of the tuple are the words that were compared
and the final element is their relatedness score
"""
stop_check = kwargs.get('stop_check', None)
call_back = kwargs.get('call_back', None)
min_rel = kwargs.get('min_rel', None)
max_rel = kwargs.get('max_rel', None)
if algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
try:
bound_count = freq_base['#']
freq_base = {k:v for k,v in freq_base.items() if k != '#'}
freq_base['total'] -= bound_count
except KeyError:
pass
relate_func = partial(khorsi, freq_base=freq_base,
sequence_type = corpus_context.sequence_type)
elif algorithm == 'edit_distance':
relate_func = partial(edit_distance,
sequence_type = corpus_context.sequence_type)
elif algorithm == 'phono_edit_distance':
relate_func = partial(phono_edit_distance,
sequence_type = corpus_context.sequence_type,
features = corpus_context.specifier)
else:
raise(StringSimilarityError('{} is not a possible string similarity algorithm.'.format(algorithm)))
related_data = []
if isinstance(query,Word):
if call_back is not None:
total = len(corpus_context)
if min_rel is not None or max_rel is not None:
total *= 2
cur = 0
call_back('Calculating string similarity...')
call_back(cur,total)
targ_word = query
relate = list()
for word in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 50 == 0:
call_back(cur)
relatedness = relate_func(targ_word, word)
if min_rel is not None and relatedness < min_rel:
continue
if max_rel is not None and relatedness > max_rel:
continue
related_data.append( (targ_word,word,relatedness) )
#Sort the list by most morphologically related
related_data.sort(key=lambda t:t[-1])
if related_data[0][1] != targ_word:
related_data.reverse()
elif isinstance(query, tuple):
w1 = query[0]
w2 = query[1]
relatedness = relate_func(w1,w2)
related_data.append((w1,w2,relatedness))
elif hasattr(query,'__iter__'):
if call_back is not None:
total = len(query)
cur = 0
call_back('Calculating string similarity...')
if total:
call_back(cur,total)
for q1,q2 in query:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 50 == 0:
call_back(cur)
w1 = q1
w2 = q2
relatedness = relate_func(w1,w2)
if min_rel is not None and relatedness < min_rel:
continue
if max_rel is not None and relatedness > max_rel:
continue
related_data.append( (w1,w2,relatedness) )
return related_data
| {
"repo_name": "PhonologicalCorpusTools/CorpusTools",
"path": "corpustools/symbolsim/string_similarity.py",
"copies": "1",
"size": "5556",
"license": "bsd-3-clause",
"hash": 5981188909054042000,
"line_mean": 37.3172413793,
"line_max": 107,
"alpha_frac": 0.5966522678,
"autogenerated": false,
"ratio": 4.118606375092662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215258642892662,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from couchdbkit import ResourceNotFound
from couchdbkit.ext.django.schema import *
import itertools
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from datetime import datetime
from django.db import models
import json_field
from casexml.apps.case.cleanup import close_case
from corehq.apps.commtrack.const import COMMTRACK_USERNAME
from corehq.apps.domain.models import Domain
from corehq.apps.products.models import SQLProduct
from corehq.toggles import LOCATION_TYPE_STOCK_RATES
from mptt.models import MPTTModel, TreeForeignKey
LOCATION_SHARING_PREFIX = 'locationgroup-'
LOCATION_REPORTING_PREFIX = 'locationreportinggroup-'
class LocationTypeManager(models.Manager):
def full_hierarchy(self, domain):
"""
Returns a graph of the form
{
'<loc_type_id>: (
loc_type,
{'<child_loc_type_id>': (child_loc_type, [...])}
)
}
"""
hierarchy = {}
def insert_loc_type(loc_type):
"""
Get parent location's hierarchy, insert loc_type into it, and return
hierarchy below loc_type
"""
if not loc_type.parent_type:
lt_hierarchy = hierarchy
else:
lt_hierarchy = insert_loc_type(loc_type.parent_type)
if loc_type.id not in lt_hierarchy:
lt_hierarchy[loc_type.id] = (loc_type, {})
return lt_hierarchy[loc_type.id][1]
for loc_type in self.filter(domain=domain).all():
insert_loc_type(loc_type)
return hierarchy
def by_domain(self, domain):
"""
Sorts location types by hierarchy
"""
ordered_loc_types = []
def step_through_graph(hierarchy):
for _, (loc_type, children) in hierarchy.items():
ordered_loc_types.append(loc_type)
step_through_graph(children)
step_through_graph(self.full_hierarchy(domain))
return ordered_loc_types
StockLevelField = partial(models.DecimalField, max_digits=10, decimal_places=1)
class LocationType(models.Model):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=255)
code = models.SlugField(db_index=False, null=True)
parent_type = models.ForeignKey('self', null=True)
administrative = models.BooleanField(default=False)
shares_cases = models.BooleanField(default=False)
view_descendants = models.BooleanField(default=False)
emergency_level = StockLevelField(default=0.5)
understock_threshold = StockLevelField(default=1.5)
overstock_threshold = StockLevelField(default=3.0)
objects = LocationTypeManager()
def _populate_stock_levels(self):
from corehq.apps.commtrack.models import CommtrackConfig
ct_config = CommtrackConfig.for_domain(self.domain)
if (
(ct_config is None)
or (not Domain.get_by_name(self.domain).commtrack_enabled)
or LOCATION_TYPE_STOCK_RATES.enabled(self.domain)
):
return
config = ct_config.stock_levels_config
self.emergency_level = config.emergency_level
self.understock_threshold = config.understock_threshold
self.overstock_threshold = config.overstock_threshold
def save(self, *args, **kwargs):
if not self.code:
from corehq.apps.commtrack.util import unicode_slug
self.code = unicode_slug(self.name)
self._populate_stock_levels()
return super(LocationType, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
@property
@memoized
def can_have_children(self):
return LocationType.objects.filter(parent_type=self).exists()
class SQLLocation(MPTTModel):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=100, null=True)
location_id = models.CharField(max_length=100, db_index=True, unique=True)
location_type = models.ForeignKey(LocationType, null=True)
site_code = models.CharField(max_length=255)
external_id = models.CharField(max_length=255, null=True)
metadata = json_field.JSONField(default={})
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
is_archived = models.BooleanField(default=False)
latitude = models.DecimalField(max_digits=20, decimal_places=10, null=True)
longitude = models.DecimalField(max_digits=20, decimal_places=10, null=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
# Use getter and setter below to access this value
# since stocks_all_products can cause an empty list to
# be what is stored for a location that actually has
# all products available.
_products = models.ManyToManyField(SQLProduct, null=True)
stocks_all_products = models.BooleanField(default=True)
supply_point_id = models.CharField(max_length=255, db_index=True, unique=True, null=True)
@property
def products(self):
"""
If there are no products specified for this location, assume all
products for the domain are relevant.
"""
if self.stocks_all_products:
return SQLProduct.by_domain(self.domain)
else:
return self._products.all()
@products.setter
def products(self, value):
# this will set stocks_all_products to true if the user
# has added all products in the domain to this location
self.stocks_all_products = (set(value) ==
set(SQLProduct.by_domain(self.domain)))
self._products = value
class Meta:
unique_together = ('domain', 'site_code',)
def __unicode__(self):
return u"{} ({})".format(self.name, self.domain)
def __repr__(self):
return "<SQLLocation(domain=%s, name=%s)>" % (
self.domain,
self.name
)
@property
def display_name(self):
return u"{} [{}]".format(self.name, self.location_type.name)
def archived_descendants(self):
"""
Returns a list of archived descendants for this location.
"""
return self.get_descendants().filter(is_archived=True)
def child_locations(self, include_archive_ancestors=False):
"""
Returns a list of this location's children.
"""
children = self.get_children()
return _filter_for_archived(children, include_archive_ancestors)
@classmethod
def root_locations(cls, domain, include_archive_ancestors=False):
roots = cls.objects.root_nodes().filter(domain=domain)
return _filter_for_archived(roots, include_archive_ancestors)
def _make_group_object(self, user_id, case_sharing):
def group_name():
return '/'.join(
list(self.get_ancestors().values_list('name', flat=True)) +
[self.name]
)
from corehq.apps.groups.models import UnsavableGroup
g = UnsavableGroup()
g.domain = self.domain
g.users = [user_id] if user_id else []
g.last_modified = datetime.utcnow()
if case_sharing:
g.name = group_name() + '-Cases'
g._id = LOCATION_SHARING_PREFIX + self.location_id
g.case_sharing = True
g.reporting = False
else:
# reporting groups
g.name = group_name()
g._id = LOCATION_REPORTING_PREFIX + self.location_id
g.case_sharing = False
g.reporting = True
g.metadata = {
'commcare_location_type': self.location_type.name,
'commcare_location_name': self.name,
}
for key, val in self.metadata.items():
g.metadata['commcare_location_' + key] = val
return g
def case_sharing_group_object(self, user_id=None):
"""
Returns a fake group object that cannot be saved.
This is used for giving users access via case
sharing groups, without having a real group
for every location that we have to manage/hide.
"""
return self._make_group_object(
user_id,
True,
)
def reporting_group_object(self, user_id=None):
"""
Returns a fake group object that cannot be saved.
Similar to case_sharing_group_object method, but for
reporting groups.
"""
return self._make_group_object(
user_id,
False,
)
@property
@memoized
def couch_location(self):
return Location.get(self.location_id)
def _filter_for_archived(locations, include_archive_ancestors):
"""
Perform filtering on a location queryset.
include_archive_ancestors toggles between selecting only active
children and selecting any child that is archived or has
archived descendants.
"""
if include_archive_ancestors:
return [
item for item in locations
if item.is_archived or item.archived_descendants()
]
else:
return locations.filter(is_archived=False)
class Location(CachedCouchDocumentMixin, Document):
domain = StringProperty()
name = StringProperty()
location_type = StringProperty()
site_code = StringProperty() # should be unique, not yet enforced
# unique id from some external data source
external_id = StringProperty()
metadata = DictProperty()
last_modified = DateTimeProperty()
is_archived = BooleanProperty(default=False)
latitude = FloatProperty()
longitude = FloatProperty()
# a list of doc ids, referring to the parent location, then the
# grand-parent, and so on up to the root location in the hierarchy
# TODO: in future, support multiple types of parentage with
# independent hierarchies
lineage = StringListProperty()
previous_parents = StringListProperty()
def __init__(self, *args, **kwargs):
if 'parent' in kwargs:
parent = kwargs['parent']
if parent:
if not isinstance(parent, Document):
# 'parent' is a doc id
parent = Location.get(parent)
lineage = list(reversed(parent.path))
else:
lineage = []
kwargs['lineage'] = lineage
del kwargs['parent']
super(Document, self).__init__(*args, **kwargs)
def __repr__(self):
return "%s (%s)" % (self.name, self.location_type)
def __eq__(self, other):
if isinstance(other, Location):
return self._id == other._id
else:
return False
def __hash__(self):
return hash(self._id)
# Method return a non save SQLLocation object, because when we want sync location in task, we can have some
# problems. For example: we can have location in SQL but without location type.
# this behavior causes problems when we want go to locations page - 500 error.
# SQLlocation object is saved together with Couch object in save method.
def _sync_location(self):
properties_to_sync = [
('location_id', '_id'),
'domain',
'name',
'site_code',
'external_id',
'latitude',
'longitude',
'is_archived',
'metadata'
]
try:
is_new = False
sql_location = SQLLocation.objects.get(location_id=self._id)
except SQLLocation.DoesNotExist:
is_new = True
sql_location = SQLLocation(domain=self.domain, site_code=self.site_code)
if is_new or (sql_location.location_type.name != self.location_type):
sql_location.location_type, _ = LocationType.objects.get_or_create(
domain=self.domain,
name=self.location_type,
)
for prop in properties_to_sync:
if isinstance(prop, tuple):
sql_prop, couch_prop = prop
else:
sql_prop = couch_prop = prop
if hasattr(self, couch_prop):
setattr(sql_location, sql_prop, getattr(self, couch_prop))
# sync supply point id
sp = self.linked_supply_point()
if sp:
sql_location.supply_point_id = sp._id
# sync parent connection
parent_id = self.parent_id
if parent_id:
sql_location.parent = SQLLocation.objects.get(location_id=parent_id)
return sql_location
@property
def sql_location(self):
return SQLLocation.objects.get(location_id=self._id)
def _archive_single_location(self):
"""
Archive a single location, caller is expected to handle
archiving children as well.
This is just used to prevent having to do recursive
couch queries in `archive()`.
"""
self.is_archived = True
self.save()
sp = self.linked_supply_point()
# sanity check that the supply point exists and is still open.
# this is important because if you archive a child, then try
# to archive the parent, we don't want to try to close again
if sp and not sp.closed:
close_case(sp._id, self.domain, COMMTRACK_USERNAME)
def archive(self):
"""
Mark a location and its dependants as archived.
This will cause it (and its data) to not show up in default
Couch and SQL views.
"""
for loc in [self] + self.descendants:
loc._archive_single_location()
def _unarchive_single_location(self):
"""
Unarchive a single location, caller is expected to handle
unarchiving children as well.
This is just used to prevent having to do recursive
couch queries in `unarchive()`.
"""
self.is_archived = False
self.save()
# reopen supply point case if needed
sp = self.linked_supply_point()
# sanity check that the supply point exists and is not open.
# this is important because if you unarchive a child, then try
# to unarchive the parent, we don't want to try to open again
if sp and sp.closed:
for action in sp.actions:
if action.action_type == 'close':
action.xform.archive(user=COMMTRACK_USERNAME)
break
def unarchive(self):
"""
Unarchive a location and reopen supply point case if it
exists.
"""
for loc in [self] + self.descendants:
loc._unarchive_single_location()
def save(self, *args, **kwargs):
"""
Saving a couch version of Location will trigger
one way syncing to the SQLLocation version of this
location.
"""
self.last_modified = datetime.utcnow()
# lazy migration for site_code
if not self.site_code:
from corehq.apps.commtrack.util import generate_code
self.site_code = generate_code(
self.name,
Location.site_codes_for_domain(self.domain)
)
sql_location = None
result = super(Location, self).save(*args, **kwargs)
# try sync locations and when SQLLocation doesn't returned, removed Couch object from database.
# added because when we sync location by tasks we can have behavior that the task can be
# killed in _sync_location method and this causes the problems
try:
sql_location = self._sync_location()
finally:
if sql_location:
sql_location.save()
else:
self.delete()
result = None
return result
@classmethod
def filter_by_type(cls, domain, loc_type, root_loc=None):
loc_id = root_loc._id if root_loc else None
relevant_ids = [r['id'] for r in cls.get_db().view('locations/by_type',
reduce=False,
startkey=[domain, loc_type, loc_id],
endkey=[domain, loc_type, loc_id, {}],
).all()]
return (
cls.wrap(l) for l in iter_docs(cls.get_db(), list(relevant_ids))
if not l.get('is_archived', False)
)
@classmethod
def filter_by_type_count(cls, domain, loc_type, root_loc=None):
loc_id = root_loc._id if root_loc else None
return cls.get_db().view('locations/by_type',
reduce=True,
startkey=[domain, loc_type, loc_id],
endkey=[domain, loc_type, loc_id, {}],
).one()['value']
@classmethod
def by_domain(cls, domain, include_docs=True):
relevant_ids = set([r['id'] for r in cls.get_db().view(
'locations/by_type',
reduce=False,
startkey=[domain],
endkey=[domain, {}],
).all()])
if not include_docs:
return relevant_ids
else:
return (
cls.wrap(l) for l in iter_docs(cls.get_db(), list(relevant_ids))
if not l.get('is_archived', False)
)
@classmethod
def site_codes_for_domain(cls, domain):
"""
This method is only used in management commands and lazy
migrations so DOES NOT exclude archived locations.
"""
return set([r['key'][1] for r in cls.get_db().view(
'locations/prop_index_site_code',
reduce=False,
startkey=[domain],
endkey=[domain, {}],
).all()])
@classmethod
def by_site_code(cls, domain, site_code):
"""
This method directly looks up a single location
and can return archived locations.
"""
result = cls.get_db().view(
'locations/prop_index_site_code',
reduce=False,
startkey=[domain, site_code],
endkey=[domain, site_code, {}],
).first()
return Location.get(result['id']) if result else None
@classmethod
def root_locations(cls, domain):
"""
Return all active top level locations for this domain
"""
return root_locations(domain)
@classmethod
def all_locations(cls, domain):
return all_locations(domain)
@classmethod
def get_in_domain(cls, domain, id):
if id:
try:
loc = Location.get(id)
assert domain == loc.domain
return loc
except (ResourceNotFound, AssertionError):
pass
return None
@property
def is_root(self):
return not self.lineage
@property
def parent_id(self):
if self.is_root:
return None
return self.lineage[0]
@property
def parent(self):
parent_id = self.parent_id
return Location.get(parent_id) if parent_id else None
def siblings(self, parent=None):
if not parent:
parent = self.parent
return [loc for loc in (parent.children if parent else root_locations(self.domain)) if loc._id != self._id]
@property
def path(self):
_path = list(reversed(self.lineage))
_path.append(self._id)
return _path
@property
def _key_bounds(self):
startkey = list(itertools.chain([self.domain], self.path, ['']))
endkey = list(itertools.chain(startkey[:-1], [{}]))
return startkey, endkey
@property
def descendants(self):
"""return list of all locations that have this location as an ancestor"""
startkey, endkey = self._key_bounds
return self.view('locations/hierarchy', startkey=startkey, endkey=endkey, reduce=False, include_docs=True).all()
@property
def children(self):
"""return list of immediate children of this location"""
startkey, endkey = self._key_bounds
depth = len(self.path) + 2 # 1 for domain, 1 for next location level
q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)
keys = [e['key'] for e in q if len(e['key']) == depth]
return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()
@property
def _geopoint(self):
return '%s %s' % (self.latitude, self.longitude) if self.latitude is not None and self.longitude is not None else None
def linked_supply_point(self):
from corehq.apps.commtrack.models import SupplyPointCase
return SupplyPointCase.get_by_location(self)
@property
def group_id(self):
"""
Returns the id with a prefix because this is
the magic id we are force setting the locations
case sharing group to be.
This is also the id that owns supply point cases.
"""
return LOCATION_SHARING_PREFIX + self._id
@property
def location_type_object(self):
return self.sql_location.location_type
def root_locations(domain):
results = Location.get_db().view('locations/hierarchy',
startkey=[domain], endkey=[domain, {}],
reduce=True, group_level=2)
ids = [res['key'][-1] for res in results]
locs = [Location.get(id) for id in ids]
return [loc for loc in locs if not loc.is_archived]
def all_locations(domain):
return Location.view('locations/hierarchy', startkey=[domain], endkey=[domain, {}],
reduce=False, include_docs=True).all()
| {
"repo_name": "benrudolph/commcare-hq",
"path": "corehq/apps/locations/models.py",
"copies": "1",
"size": "21779",
"license": "bsd-3-clause",
"hash": 7514116045174328000,
"line_mean": 32.8709175739,
"line_max": 126,
"alpha_frac": 0.6025070022,
"autogenerated": false,
"ratio": 4.124810606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5227317608260605,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from ...custom_tag_index import custom_tag_index
from ... import utils
SIDE_COLOR = "color(var(--orangish) blend(var(--background) 60%))"
def get_inline_documentation(cfml_view, doc_type):
if not cfml_view.project_name:
return None
if cfml_view.view.match_selector(cfml_view.position, "meta.tag.custom.cfml"):
tag_name = utils.get_tag_name(cfml_view.view, cfml_view.position)
file_path, tag_info = custom_tag_index.get_index_by_tag_name(
cfml_view.project_name, tag_name
)
if file_path:
doc, callback = get_documentation(
cfml_view.view, tag_name, file_path, tag_info
)
return cfml_view.Documentation(None, doc, callback, 2)
return None
def get_documentation(view, tag_name, file_path, tag_info):
custom_tag_doc = {"side_color": SIDE_COLOR, "html": {}}
custom_tag_doc["html"]["links"] = []
custom_tag_doc["html"]["header"] = tag_name
custom_tag_doc["html"]["description"] = (
'<strong>path</strong>: <a class="plain-link" href="__go_to_customtag">'
+ file_path
+ "</a>"
)
custom_tag_doc["html"]["body"] = "<br>"
custom_tag_doc["html"]["body"] += (
"<strong>Closing tag:</strong> "
+ ("true" if tag_info["has_end_tag"] else "false")
+ "<br>"
)
custom_tag_doc["html"]["body"] += "<strong>Attributes:</strong> " + ", ".join(
tag_info["attributes"]
)
callback = partial(on_navigate, view, file_path)
return custom_tag_doc, callback
def on_navigate(view, file_path, href):
view.window().open_file(file_path)
| {
"repo_name": "jcberquist/sublimetext-cfml",
"path": "src/plugins_/custom_tags/documentation.py",
"copies": "2",
"size": "1671",
"license": "mit",
"hash": -4433611294972765000,
"line_mean": 29.9444444444,
"line_max": 82,
"alpha_frac": 0.5954518253,
"autogenerated": false,
"ratio": 3.263671875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48591237003000004,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from datetime import datetime
from logbook import Logger
from .sync import sync, fast_sync, regular
assert regular
from .mod import mod
log = Logger(__name__)
def has(backend, kind, text):
return backend.has_filled_query_bi_kind_and_text(kind, text)
def query(backend, kind, text, timeout, **kargs):
return mod(kind).search(text=text, timeout=timeout, backend=backend, **kargs)
def _search(backend, kind, text, timeout, sync_on_expire=None, **kargs):
'''return None means first sync failed'''
sync_options = {key: kargs[key] for key in kargs if key in [
'good',
'sync_interval'
]}
def _sync():
options = dict(kind=kind, text=text, timeout=timeout, backend=backend)
options.update(sync_options)
return fast_sync(**options) or sync(**options)
get_query = partial(
backend.get_query_bi_kind_and_text,
kind=kind,
text=text
)
if not has(backend, kind, text):
log.info('query {} of {} dosn\'t exist', text, kind)
if _sync():
query = get_query()
else:
query = None
else:
query = get_query()
if mod(query.kind).expired(query):
log.debug('query {} of {} expired', text, kind)
if (
mod(query.kind).sync_on_expire(query) if sync_on_expire is None
else sync_on_expire
):
if _sync():
query = get_query()
else:
log.debug(
'sync {} of {} timeout or meet expected error',
text,
kind
)
else:
mark_need_sync(backend, kind, text)
assert query is None or query.result, 'invalid query: {}'.format(query)
return query
def mark_need_sync(backend, kind, text):
log.debug('mark query {} of {} need sync', text, kind)
backend.set_next_sync_time_bi_kind_and_text(kind, text, datetime.utcnow())
| {
"repo_name": "Answeror/torabot",
"path": "torabot/core/query.py",
"copies": "1",
"size": "2054",
"license": "mit",
"hash": -4385720599757784000,
"line_mean": 29.2058823529,
"line_max": 81,
"alpha_frac": 0.558909445,
"autogenerated": false,
"ratio": 3.9198473282442747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49787567732442745,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from datetime import datetime, timedelta, time
from collections import MutableMapping
import numpy as np
from pandas._libs import tslib
from pandas._libs.tslibs.strptime import array_strptime
from pandas._libs.tslibs import parsing, conversion
from pandas._libs.tslibs.parsing import ( # noqa
parse_time_string,
DateParseError,
_format_is_iso,
_guess_datetime_format)
from pandas.core.dtypes.common import (
_ensure_object,
is_datetime64_ns_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_integer,
is_float,
is_list_like,
is_scalar,
is_numeric_dtype)
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
from pandas.compat import zip
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
from pandas import Index
if not Index(arg).is_unique:
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result)
else:
return DatetimeIndex(result, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([tslib.Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results)
return tz_results
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = _ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz)
except tslib.OutOfBoundsDatetime:
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=tz == 'utc',
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601
)
if is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = tslib.Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = tslib.Timestamp.max.to_julian_date() - j0
j_min = tslib.Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslib.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = tslib.Timestamp(origin)
except tslib.OutOfBoundsDatetime:
raise tslib.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= tslib.Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslib.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_timedelta : Convert argument to timedelta.
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, tslib.Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
from pandas import Series
values = convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
return values
def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except:
pass
return None
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = _ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
msg = "Value is outside of acceptable range: {value}".format(value=val)
raise ValueError(msg)
return OLE_TIME_ZERO + timedelta(days=val)
| {
"repo_name": "pratapvardhan/pandas",
"path": "pandas/core/tools/datetimes.py",
"copies": "1",
"size": "30078",
"license": "bsd-3-clause",
"hash": 7349405014616780000,
"line_mean": 33.4536082474,
"line_max": 79,
"alpha_frac": 0.570117694,
"autogenerated": false,
"ratio": 4.220289041672513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290406735672513,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from datetime import datetime, timedelta, time
from collections import MutableMapping
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs.strptime import array_strptime
from pandas._libs.tslibs import parsing, conversion, Timestamp
from pandas._libs.tslibs.parsing import ( # noqa
parse_time_string,
DateParseError,
_format_is_iso,
_guess_datetime_format)
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_ns_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_integer,
is_float,
is_list_like,
is_scalar,
is_numeric_dtype)
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
from pandas.compat import zip
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
from pandas import Index
if not Index(arg).is_unique:
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result)
else:
return DatetimeIndex(result, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results)
return tz_results
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=tz == 'utc',
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601
)
if is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_timedelta : Convert argument to timedelta.
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
from pandas import Series
values = convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
return values
def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except:
pass
return None
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
msg = "Value is outside of acceptable range: {value}".format(value=val)
raise ValueError(msg)
return OLE_TIME_ZERO + timedelta(days=val)
| {
"repo_name": "kdebrab/pandas",
"path": "pandas/core/tools/datetimes.py",
"copies": "1",
"size": "30058",
"license": "bsd-3-clause",
"hash": -3882714463341317000,
"line_mean": 33.43069874,
"line_max": 79,
"alpha_frac": 0.5700312729,
"autogenerated": false,
"ratio": 4.228756330894766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298787603794766,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from datetime import datetime, time
from collections import MutableMapping
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs.strptime import array_strptime
from pandas._libs.tslibs import parsing, conversion, Timestamp
from pandas._libs.tslibs.parsing import ( # noqa
parse_time_string,
DateParseError,
_format_is_iso,
_guess_datetime_format)
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_ns_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_integer,
is_float,
is_list_like,
is_scalar,
is_numeric_dtype,
is_object_dtype)
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
from pandas.compat import zip
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
from pandas import Index
if not Index(arg).is_unique:
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result, name=name)
else:
return DatetimeIndex(result, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
return Index(result, name=name)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result, tz_parsed = tslib.array_to_datetime(
arg,
errors=errors,
utc=tz == 'utc',
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601
)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result
except ValueError as e:
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_timedelta : Convert argument to timedelta.
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
from pandas import Series
values = convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
return values
def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except ValueError:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except ValueError:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except ValueError:
pass
return None
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
| {
"repo_name": "cython-testbed/pandas",
"path": "pandas/core/tools/datetimes.py",
"copies": "4",
"size": "30680",
"license": "bsd-3-clause",
"hash": 4453117511039858700,
"line_mean": 34.0628571429,
"line_max": 79,
"alpha_frac": 0.5672425033,
"autogenerated": false,
"ratio": 4.278343327290475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6845585830590475,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from datetime import timedelta
from tornado import gen
from tornadoredis import Client
from tornadoredis.exceptions import ResponseError
from tornadoredis.pubsub import BaseSubscriber
class CelerySubscriber(BaseSubscriber):
def unsubscribe_channel(self, channel_name):
"""Unsubscribes the redis client from the channel"""
del self.subscribers[channel_name]
del self.subscriber_count[channel_name]
self.redis.unsubscribe(channel_name)
def on_message(self, msg):
if not msg:
return
if msg.kind.decode('utf-8') == 'message' and msg.body:
# Get the list of subscribers for this channel
for subscriber in self.subscribers[msg.channel.decode('utf-8')].keys():
subscriber(msg.body)
super(CelerySubscriber, self).on_message(msg)
class RedisClient(Client):
@gen.engine
def _consume_bulk(self, tail, callback=None):
response = yield gen.Task(self.connection.read, int(tail) + 2)
if isinstance(response, Exception):
raise response
if not response:
raise ResponseError('EmptyResponse')
else:
# We don't cast try to convert to unicode here as the response
# may not be utf-8 encoded, for example if using msgpack as a
# serializer
# response = to_unicode(response)
response = response[:-2]
callback(response)
class RedisConsumer(object):
def __init__(self, producer):
self.producer = producer
backend = producer.app.backend
self.client = RedisClient(host=backend.connparams['host'],
port=backend.connparams['port'],
password=backend.connparams['password'],
selected_db=backend.connparams['db'],
io_loop=producer.conn_pool.io_loop)
self.client.connect()
self.subscriber = CelerySubscriber(self.client)
def wait_for(self, task_id, callback, expires=None, persistent=None):
key = self.producer.app.backend.get_key_for_task(task_id).decode('utf-8')
if expires:
timeout = self.producer.conn_pool.io_loop.add_timeout(
timedelta(microseconds=expires), self.on_timeout, key)
else:
timeout = None
self.subscriber.subscribe(
key, partial(self.on_result, key, callback, timeout))
def on_result(self, key, callback, timeout, result):
if timeout:
self.producer.conn_pool.io_loop.remove_timeout(timeout)
self.subscriber.unsubscribe_channel(key)
callback(result)
def on_timeout(self, key):
self.subscriber.unsubscribe_channel(key)
| {
"repo_name": "mher/tornado-celery",
"path": "tcelery/redis.py",
"copies": "1",
"size": "2812",
"license": "bsd-3-clause",
"hash": 5428786467046942000,
"line_mean": 37.5205479452,
"line_max": 83,
"alpha_frac": 0.6233997155,
"autogenerated": false,
"ratio": 4.299694189602446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5423093905102446,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from dateutil import rrule
from corehq.apps.locations.dbaccessors import get_one_user_at_location
from corehq.apps.locations.models import SQLLocation, Location
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.users.models import CommCareUser
from custom.ilsgateway.filters import ProgramFilter, ILSDateFilter
from custom.ilsgateway.models import OrganizationSummary, GroupSummary, SupplyPointStatusTypes, DeliveryGroups
from custom.ilsgateway.tanzania import ILSData, DetailsReport
from custom.ilsgateway.tanzania.reports.mixins import RandRSubmissionData
from custom.ilsgateway.tanzania.reports.utils import randr_value, get_span, \
rr_format_percent, link_format, make_url
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
from custom.ilsgateway.tanzania.reports.facility_details import FacilityDetailsReport, InventoryHistoryData, \
RegistrationData, RandRHistory, Notes, RecentMessages
from django.utils.translation import ugettext as _
class RRStatus(ILSData):
show_table = True
title = "R&R Status"
slug = "rr_status"
show_chart = False
@property
def rows(self):
rows = []
if self.config['org_summary']:
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
for child in locations:
try:
org_summary = OrganizationSummary.objects.filter(
date__range=(self.config['startdate'], self.config['enddate']),
location_id=child.location_id
)
except OrganizationSummary.DoesNotExist:
return []
self.config['org_summary'] = org_summary
rr_data = RandRSubmissionData(config=self.config).rows[0]
fp_partial = partial(rr_format_percent, denominator=rr_data.total)
total_responses = 0
total_possible = 0
group_summaries = GroupSummary.objects.filter(
org_summary__date__lte=self.config['startdate'],
org_summary__location_id=child.location_id,
title=SupplyPointStatusTypes.R_AND_R_FACILITY
)
for group_summary in group_summaries:
if group_summary:
total_responses += group_summary.responded
total_possible += group_summary.total
hist_resp_rate = rr_format_percent(total_responses, total_possible)
url = make_url(RRreport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(child.location_id,
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
rows.append(
[
link_format(child.name, url),
fp_partial(rr_data.on_time),
fp_partial(rr_data.late),
fp_partial(rr_data.not_submitted),
fp_partial(rr_data.not_responding),
hist_resp_rate
]
)
return rows
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Name')),
DataTablesColumn(_('% Facilities Submitting R&R On Time')),
DataTablesColumn(_("% Facilities Submitting R&R Late")),
DataTablesColumn(_("% Facilities With R&R Not Submitted")),
DataTablesColumn(_("% Facilities Not Responding To R&R Reminder")),
DataTablesColumn(_("Historical Response Rate"))
)
class RRReportingHistory(ILSData):
show_table = True
slug = "rr_reporting_history"
show_chart = False
def __init__(self, config=None, css_class='row_chart'):
super(RRReportingHistory, self).__init__(config, css_class)
self.config = config or {}
self.css_class = css_class
datespan_type = self.config.get('datespan_type')
if datespan_type == 1:
self.title = "R&R Reporting History (Group %s)" %\
DeliveryGroups(int(self.config['datespan_first'])).current_submitting_group()
else:
self.title = "R&R Reporting History"
@property
def rows(self):
rows = []
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
dg = []
for date in list(rrule.rrule(rrule.MONTHLY, dtstart=self.config['startdate'],
until=self.config['enddate'])):
dg.extend(DeliveryGroups().submitting(locations, date.month))
for child in dg:
total_responses = 0
total_possible = 0
submitted, rr_value = randr_value(child.location_id, self.config['startdate'], self.config['enddate'])
if child.is_archived and not rr_value:
continue
group_summaries = GroupSummary.objects.filter(
org_summary__date__lte=self.config['startdate'],
org_summary__location_id=child.location_id,
title=SupplyPointStatusTypes.R_AND_R_FACILITY
)
for group_summary in group_summaries:
if group_summary:
total_responses += group_summary.responded
total_possible += group_summary.total
hist_resp_rate = rr_format_percent(total_responses, total_possible)
url = make_url(FacilityDetailsReport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(self.config['location_id'],
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
contact = get_one_user_at_location(child.location_id)
if contact:
role = contact.user_data.get('role') or ""
args = (contact.first_name, contact.last_name, role, contact.default_phone_number)
contact_string = "%s %s (%s) %s" % args
else:
contact_string = ""
rows.append(
[
child.site_code,
link_format(child.name, url),
get_span(submitted) % (rr_value.strftime("%d %b %Y") if rr_value else "Not reported"),
contact_string,
hist_resp_rate
]
)
return rows
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Code')),
DataTablesColumn(_('Facility Name')),
DataTablesColumn(_('R&R Status')),
DataTablesColumn(_('Contact')),
DataTablesColumn(_('Historical Response Rate'))
)
class RRreport(DetailsReport):
slug = "rr_report"
name = 'R & R'
use_datatables = True
@property
def title(self):
title = _('R & R {0}'.format(self.title_month))
if self.location and self.location.location_type.name.upper() == 'FACILITY':
return "{0} ({1}) Group {2}".format(self.location.name,
self.location.site_code,
self.location.metadata.get('group', '---'))
return title
@property
def fields(self):
fields = [AsyncLocationFilter, ILSDateFilter, ProgramFilter]
if self.location and self.location.location_type.name.upper() == 'FACILITY':
fields = []
return fields
@property
@memoized
def data_providers(self):
config = self.report_config
data_providers = []
if config['location_id']:
data_providers = [RandRSubmissionData(config=config, css_class='row_chart_all')]
location = Location.get(config['location_id'])
if location.location_type in ['REGION', 'MSDZONE', 'MOHSW']:
data_providers.append(RRStatus(config=config, css_class='row_chart_all'))
elif location.location_type == 'FACILITY':
return [
InventoryHistoryData(config=config),
RandRHistory(config=config),
Notes(config=config),
RecentMessages(config=config),
RegistrationData(config=dict(loc_type='FACILITY', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='DISTRICT', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='REGION', **config), css_class='row_chart_all')
]
else:
data_providers.append(RRReportingHistory(config=config, css_class='row_chart_all'))
return data_providers
@property
def report_context(self):
ret = super(RRreport, self).report_context
ret['view_mode'] = 'ror'
return ret
| {
"repo_name": "puttarajubr/commcare-hq",
"path": "custom/ilsgateway/tanzania/reports/randr.py",
"copies": "1",
"size": "9532",
"license": "bsd-3-clause",
"hash": -3221698666377305600,
"line_mean": 41.7443946188,
"line_max": 114,
"alpha_frac": 0.5675618968,
"autogenerated": false,
"ratio": 4.303386004514673,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5370947901314672,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from dateutil.parser import parse
from xml.etree import ElementTree as ET
from pyramid.decorator import reify
from intranet3.asyncfetchers.base import ( BaseFetcher, CSVParserMixin,
SimpleProtocol, BasicAuthMixin,
FetchException, Bug, cached_bug_fetcher )
from intranet3 import helpers as h
from intranet3.log import EXCEPTION_LOG, INFO_LOG
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
class BugzillaBug(Bug):
def get_url(self, number=None):
number = number if number else self.id
return self.tracker.url + '/show_bug.cgi?id=%(id)s' % {'id': number}
def is_unassigned(self):
return True if self.owner and self.owner.email == 'nobody@example.com' else False
@reify
def is_blocked(self):
wb_blocked = self.whiteboard.get('blocked')
if wb_blocked in h.positive_values:
return True
if wb_blocked is None: # blocked param is not set
for bug_data in self.dependson.values():
if bug_data.get('resolved', True) is False:
return True
return False
def get_status(self):
return self.status
def get_resolution(self):
return self.resolution
bugzilla_converter = h.Converter(
id='bug_id',
desc='short_desc',
reporter='reporter',
owner='assigned_to',
priority=lambda d: d.get('priority', ''), # + '/' + d['priority'],
severity=lambda d: d.get('bug_severity', ''),
status=lambda d: d.get('bug_status', ''), # + '/' + d['resolution'],
resolution=lambda d: d.get('resolution', ''),
project_name='product',
component_name='component',
deadline='deadline',
opendate=lambda d: parse(d.get('opendate', '')),
changeddate=lambda d: parse(d.get('changeddate', '')),
whiteboard='status_whiteboard',
version='version',
)
def _fetcher_function(resolved, single):
@cached_bug_fetcher(lambda: u'resolved-%s-single-%s' % (resolved, single))
def fetcher(self):
params = self.resolved_common_url_params() if resolved else self.common_url_params()
params.update(self.single_user_params() if single else self.all_users_params())
url = h.serialize_url(self.tracker.url + '/buglist.cgi?', **params)
self.fetch(url)
return fetcher
def _query_fetcher_function(**conditions):
def fetcher(self, ticket_ids, project_selector, component_selector, version):
params = dict(
ctype='csv'
)
params.update(conditions)
if ticket_ids:
params.update(bug_id=','.join(str(id) for id in ticket_ids))
else:
if project_selector:
params.update(product=project_selector)
if component_selector:
params.update(component=component_selector)
url = h.serialize_url(self.tracker.url + '/buglist.cgi?', **params)
self.fetch(url)
return fetcher
class BugzillaFetcher(BasicAuthMixin, CSVParserMixin, BaseFetcher):
""" Fetcher for Bugzilla bugs """
redirect_support = True
COLUMNS = ('bug_severity', 'assigned_to', 'version',
'bug_status', 'resolution', 'product', 'op_sys', 'short_desc',
'reporter', 'opendate', 'changeddate', 'component', 'deadline',
'bug_severity', 'product', 'priority', 'status_whiteboard')
COLUMNS_COOKIE = 'COLUMNLIST=' + "%20".join(COLUMNS)
bug_class = BugzillaBug
get_converter = lambda self: bugzilla_converter
def get_headers(self):
headers = super(BugzillaFetcher, self).get_headers()
headers['Cookie'] = [self.COLUMNS_COOKIE]
return headers
def fetch(self, url):
headers = self.get_headers()
self.request(url, headers, self.responded)
def common_url_params(self):
return dict(
bug_status=['NEW', 'ASSIGNED', 'REOPENED', 'UNCONFIRMED', 'CONFIRMED', 'WAITING'],
ctype='csv',
emailassigned_to1='1'
)
def resolved_common_url_params(self):
return {
'bug_status':['RESOLVED', 'VERIFIED'],
'ctype':'csv',
'emailreporter1':'1',
'field0-0-0':'resolution',
'type0-0-0':'notequals',
'value0-0-0':'LATER'
}
def single_user_params(self):
return dict(
emailtype1='exact',
email1=self.login
)
def all_users_params(self):
return dict(
emailtype1='regexp',
email1='(' + '|'.join(self.login_mapping.keys()) + ')'
)
fetch_user_tickets = _fetcher_function(resolved=False, single=True)
""" Start fetching tickets for current user """
fetch_all_tickets = _fetcher_function(resolved=False, single=False)
""" Start fetching tickets for all users in mapping """
fetch_user_resolved_tickets = _fetcher_function(resolved=True, single=True)
fetch_all_resolved_tickets = _fetcher_function(resolved=True, single=False)
fetch_bugs_for_query = _query_fetcher_function(bug_status=['NEW', 'ASSIGNED', 'REOPENED', 'UNCONFIRMED', 'CONFIRMED', 'WAITING'])
fetch_resolved_bugs_for_query = _query_fetcher_function(bug_status=['RESOLVED', 'VERIFIED'])
fetch_all_bugs_for_query = _query_fetcher_function()
def fetch_scrum(self, sprint_name, project_id=None):
params = dict(
ctype='csv',
status_whiteboard_type='substring',
status_whiteboard='s=%s' % sprint_name,
bug_status=['NEW', 'ASSIGNED', 'REOPENED', 'UNCONFIRMED', 'CONFIRMED', 'WAITING', 'RESOLVED', 'VERIFIED', 'CLOSED'],
)
url = h.serialize_url(self.tracker.url + '/buglist.cgi?', **params)
self.fetch(url)
def fetch_bug_titles_and_depends_on(self, ticket_ids):
params = dict(
ctype='xml',
field=['dependson', 'bug_id', 'short_desc', 'bug_severity', 'resolution'],
id=[str(id) for id in ticket_ids],
#bug_status=['NEW', 'ASSIGNED', 'REOPENED', 'UNCONFIRMED', 'RESOLVED', 'VERIFIED']
)
url = h.serialize_url(self.tracker.url + '/show_bug.cgi?', **params)
headers = self.get_headers()
self.request(url, headers, partial(self.xml_response, self.parse_response_of_bug_titles_and_depends_on))
def fetch_dependons_for_ticket_ids(self, ticket_ids):
params = dict(
ctype='xml',
field=['dependson', 'bug_id', 'bug_severity', 'resolution']
)
if ticket_ids:
ids = []
for id in ticket_ids:
if id:
ids.append(str(id))
params.update(id=ids)
if not ids:
return self.fail(FetchException(u'Ticket ids list is empty'))
url = h.serialize_url(self.tracker.url + '/show_bug.cgi?', **params)
headers = self.get_headers()
self.request(url, headers, partial(self.xml_response, self.parse_response_of_dependons_for_ticket_ids))
def _parse_xml_response(self, data, bug_callback, success_callback):
""" Parse xml """
try:
# igozilla returns iso-8859-2, but does not declare it
data = data.replace('<?xml version="1.0" standalone="yes"?>', '<?xml version="1.0" encoding="iso-8859-2" standalone="yes"?>')
xml = ET.fromstring(data)
for bug in xml.findall('bug'):
bug_callback(bug)
except BaseException, e:
EXCEPTION(u'Parse xml response failed for data %r' % data)
self.failed(e)
else:
success_callback()
def parse_response_of_bug_titles_and_depends_on(self, data):
""" Parse response for query of bug titles and depends on """
def handle(bug):
bug_id = bug.find('bug_id').text
short_desc = getattr(bug.find('short_desc'), 'text', '')
depends_on = [item.text for item in bug.findall('dependson')]
is_bug = self.is_bug(bug)
self.bugs[bug_id] = {'title': short_desc, 'depends_on': depends_on, 'is_bug': is_bug, 'severity': getattr(bug.find('bug_severity'), 'text', '')}
self._parse_xml_response(data, handle, self.success)
def get_severity(self, bug):
return getattr(bug.find('bug_severity'), 'text', '')
def is_bug(self, bug):
""" Check if given XML bug definition adheres to "bug" definition from #69234 """
severity = self.get_severity(bug)
resolution = getattr(bug.find('resolution'), 'text', '')
return (not severity in ('enhancement high', 'enhancement medium', 'enhancement low')) \
and (not resolution == 'INVALID')
def parse_response_of_dependons_for_ticket_ids(self, data):
""" Parse response for query of depends on """
dependsons = []
def handle(bug):
if self.is_bug(bug):
bug_id = bug.find('bug_id').text
self.bugs[bug_id] = True
for item in bug.findall('dependson'):
id = item.text
if not self.bugs.get(id) and id:
dependsons.append(id)
def on_success():
if not dependsons:
self.success()
else:
self.fetch_dependons_for_ticket_ids(dependsons)
self._parse_xml_response(data, handle, on_success)
def update_bugs_statuses(self, xml):
for bug in xml.findall('bug'):
bug_id = bug.find('bug_id').text
status = getattr(bug.find('bug_status'), 'text', None)
description = getattr(bug.find('short_desc'), 'text', None)
if status:
self.dependson_and_blocked_status[bug_id]['resolved'] = status in ('CLOSED', 'RESOLVED', 'VERIFIED')
if description:
self.dependson_and_blocked_status[bug_id]['desc'] = description
def parse_dependson_and_blocked_bugs_xml(self, data):
try:
# igozilla returns iso-8859-2, but does not declare it
data = data.replace('<?xml version="1.0" standalone="yes"?>', '<?xml version="1.0" encoding="iso-8859-2" standalone="yes"?>')
xml = ET.fromstring(data)
self.update_bugs_statuses(xml)
except BaseException, e:
self.failed(e)
else:
self.update_depensons_and_blocked_status()
self.success()
def get_status_of_dependson_and_blocked_bugs(self):
bug_ids = self.dependson_and_blocked_status.keys()
if bug_ids:
url = h.serialize_url('%s/show_bug.cgi?' % self.tracker.url,
ctype='xml',
id=bug_ids,
field=['bug_status', 'bug_id', 'short_desc'])
headers = self.get_headers()
self.request(url, headers, partial(self.xml_response, self.parse_dependson_and_blocked_bugs_xml))
else:
self.success()
def parse_xml(self, data):
try:
# igozilla returns iso-8859-2, but does not declare it
data = data.replace('<?xml version="1.0" standalone="yes"?>', '<?xml version="1.0" encoding="iso-8859-2" standalone="yes"?>')
xml = ET.fromstring(data.decode(self.encoding))
for bug in xml.findall('bug'):
bug_id = bug.find('bug_id').text
obj = self.bugs.get(bug_id)
if obj:
for key in ('blocked', 'dependson'):
results = dict((item.text, {'resolved': False}) for item in bug.findall(key))
self.dependson_and_blocked_status.update(results)
if results:
setattr(obj, key, results)
except BaseException, e:
self.failed(e)
else:
self.get_status_of_dependson_and_blocked_bugs()
def xml_failed(self, err):
self.fail(err)
EXCEPTION(u"XML for tracker %s failed: %s" % (self.tracker.name, err))
def xml_response(self, on_success, resp):
""" Called when server returns response headers """
if resp.code == 200:
resp.deliverBody(SimpleProtocol(on_success, self.xml_failed))
else:
self.fail(FetchException(u'Received xml response %s' % (resp.code, )))
def get_dependson_and_blocked_by(self):
url = h.serialize_url('%s/show_bug.cgi?' % self.tracker.url,
ctype='xml',
id=self.bugs.keys(),
field=['blocked', 'dependson', 'bug_id'])
headers = self.get_headers()
self.request(url, headers, partial(self.xml_response, self.parse_xml))
def received(self, data):
""" Called when server returns whole response body """
try:
for bug in self.parse(data):
self.bugs[bug.id] = bug
self.get_dependson_and_blocked_by()
except BaseException, e:
EXCEPTION(u"Could not parse tracker response")
self.fail(e)
| {
"repo_name": "pytlakp/intranetref",
"path": "src/intranet3/asyncfetchers/bugzilla.py",
"copies": "1",
"size": "13440",
"license": "mit",
"hash": -5482245253591781000,
"line_mean": 38.884272997,
"line_max": 156,
"alpha_frac": 0.5636160714,
"autogenerated": false,
"ratio": 3.8609594943981613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9830440403108573,
"avg_score": 0.01882703253791749,
"num_lines": 337
} |
from functools import partial
from difflib import HtmlDiff
import random
import string
import base64
import copy
from pathlib import Path
from collections.abc import Mapping
import black
import pandas as pd
from IPython.display import HTML, Image
from jinja2 import Environment, PackageLoader
from .NotebookIntrospector import NotebookIntrospector
from .sets import differences
from ..table import Table
_env = Environment(loader=PackageLoader('sklearn_evaluation', 'assets/nb'))
_fm = black.FileMode(string_normalization=False, line_length=40)
_htmldiff = HtmlDiff()
class NotebookCollection(Mapping):
"""Compare output from a collection of notebooks
To access output, notebooks must tag the cells (one tag per cell). For
instructions on tagging cells, `see this <https://jupyterbook.org/advanced/advanced.html#how-should-i-add-cell-tags-and-metadata-to-my-notebooks>`_.
:doc:`Click here <../user_guide/NotebookCollection>` to see the user guide.
Parameters
----------
paths : list
Paths to notebooks to load
ids : list or 'filenames', default=None
List of ids (one per notebook), if None, paths are used as identifiers,
if 'filenames', the file name is extracted from each path and used
as identifier (ignores extension)
"""
def __init__(self, paths, ids=None, scores=False):
if ids is None:
ids = paths
elif ids == 'filenames':
ids = [_get_filename(path) for path in paths]
self.nbs = {
id_: NotebookIntrospector(path, to_df=False)
for id_, path in zip(ids, paths)
}
nb = list(self.nbs.values())[0]
self._keys = list(nb.tag2output.keys())
self._scores = scores
def __getitem__(self, key):
raw = [nb[key] for nb in self.nbs.values()]
e, ids_out = add_compare_tab(raw, list(self.nbs.keys()), self._scores)
mapping = {k: v for k, v in zip(ids_out, e)}
html = tabs_html_from_content(ids_out, e)
return HTMLMapping(mapping, html)
def __iter__(self):
for k in self._keys:
yield k
def _ipython_key_completions_(self):
return self._keys
def __len__(self):
return len(self._keys)
class HTMLMapping(Mapping):
"""A mapping that has an HTML representation
"""
def __init__(self, mapping, html):
self._mapping = mapping
self._html = html
def __getitem__(self, key):
return self._mapping[key]
def _ipython_key_completions_(self):
return self._mapping.keys()
def __iter__(self):
for k in self._mapping:
yield k
def __len__(self):
return len(self._mapping)
def _repr_html_(self):
return self._html
def _get_filename(path):
path = Path(path)
return path.name.replace(path.suffix, '')
def add_compare_tab(elements, ids, scores_arg):
"""
Processes tab contents and ids, adding a "Compare" tab if possible
"""
out = copy.copy(elements)
out_ids = copy.copy(ids)
if isinstance(elements[0], (HTML, pd.DataFrame)):
summary = compare_df(elements, ids, scores_arg)
elif isinstance(elements[0], Mapping):
summary = compare_diff(elements)
# lists with dicts fail because they are not hashable
elif isinstance(elements[0], (list, set)):
summary = compare_sets(elements, ids=ids)
else:
summary = None
if summary is not None:
out.insert(0, summary)
out_ids.insert(0, 'Compare')
return out, out_ids
def tabs_html_from_content(names, contents):
"""
Generate the tabs and content to display as an HTML string
"""
# random prefix to prevent multiple tab outputs to clash with each other
prefix = ''.join(random.choice(string.ascii_lowercase) for i in range(3))
contents_html = [to_html_str(content) for content in contents]
template = _env.get_template('template.html')
html = template.render(names=names,
zip=zip,
contents=contents_html,
prefix=prefix)
return html
def to_df(obj):
"""
Converts pandas.DataFrame, if the object is already one, returns it.
Otherwise it tried to convert it from a HTML table. Raises an error
if more than one table is detected
"""
if isinstance(obj, pd.DataFrame):
return obj
dfs = pd.read_html(obj.data)
if len(dfs) > 1:
raise NotImplementedError('More than one table detected, only outputs'
' with a single table are supported')
df = dfs[0]
df.columns = process_columns(df.columns)
df = df.set_index(df.columns[0])
return df
def process_columns(columns):
"""
Helper function to parse column names from pandas.DataFrame objects
parsed from HTML tables
"""
if isinstance(columns, pd.MultiIndex):
return [process_multi_index_col(name) for name in columns]
else:
return [None, *columns[1:]]
def process_multi_index_col(col):
"""
Helper function to parse column names from pandas.DataFrame objects
with multi indexes parsed from HTML tables
"""
names = [name for name in col if 'Unnamed:' not in name]
return names[0]
def color_neg_green(s):
return (s < 0).replace({True: 'color: green', False: 'color: red'})
def color_neg_red(s):
return (s < 0).replace({True: 'color: red', False: 'color: green'})
def color(s, which, color):
"""
pandas.DataFrame function to add color to cell's text
"""
to_color = s == getattr(s[~s.isna()], which)()
return [f'color: {color}' if v else '' for v in to_color]
_color_map = {
'error': {
'max': partial(color, which='max', color='red'),
'min': partial(color, which='min', color='green'),
},
'score': {
'max': partial(color, which='max', color='green'),
'min': partial(color, which='min', color='red'),
}
}
def is_in(elements, value):
"""
Determines if a value is in a list. It also handles degenerate cases
when instead of a list, elements is True, False or None
"""
if not elements:
return False
elif elements is True:
return True
else:
return value in elements
def split_errors_and_scores(axis, scores_arg, axis_second, transpose=False):
"""
Determines which metrics are scores and which ones are metrics based on
the "scores_arg". Returns a pd.IndexSlice object that can be used in
pandas.DataFrame styling functions
"""
scores = [i for i in axis if is_in(scores_arg, i)]
errors = list(set(axis) - set(scores))
errors_slice = pd.IndexSlice[errors, axis_second]
scores_slice = pd.IndexSlice[scores, axis_second]
if transpose:
errors_slice = errors_slice[::-1]
scores_slice = scores_slice[::-1]
return errors_slice, scores_slice
def compare_diff(mappings):
"""
Generates an HTML object with a diff view of two mappings
"""
if len(mappings) != 2:
return None
m1, m2 = mappings
s1 = black.format_str(str(m1), mode=_fm).splitlines()
s2 = black.format_str(str(m2), mode=_fm).splitlines()
return HTML(_htmldiff.make_file(s1, s2))
def compare_sets(sets, ids):
"""
Generates a Table object with three columns comparing two sets: 1) elements
in both sets, 2) elements in the first set and 3) elements in the second
set. Raises an error if sets does not have two elements
"""
if len(sets) != 2:
return None
header = ['Both'] + [f'Only in {id_}' for id_ in ids]
return Table.from_columns(content=differences(*sets), header=header)
def compare_df(tables, ids, scores_arg):
"""
Generates a comparison from a list of tables. Taables can be either a
pandas.DataFrame or a str with an HTML table. The output depends
on the number of tables and rows. Returns a pandas.DataFrame with style
added (colors)
"""
dfs = [to_df(table) for table in tables]
# Single-row data frames, each metric is a single number
# TODO: check dims are consistent
if len(dfs[0]) == 1:
out = pd.concat(dfs)
out.index = ids
out = out.T
errors, scores = split_errors_and_scores(out.index,
scores_arg,
axis_second=out.columns)
if len(tables) == 2:
c1, c2 = out.columns
out['diff'] = out[c2] - out[c1]
out['diff_relative'] = (out[c2] - out[c1]) / out[c2]
out['ratio'] = out[c2] / out[c1]
styled = out.style.apply(_color_map['error']['max'],
subset=errors,
axis='columns')
styled = styled.apply(_color_map['error']['min'],
subset=errors,
axis='columns')
styled = styled.apply(_color_map['score']['max'],
subset=scores,
axis='columns')
styled = styled.apply(_color_map['score']['min'],
subset=scores,
axis='columns')
styled = styled.format({'diff_relative': '{:.2%}'})
# Multiple rows, each metric is a vector
else:
# we can only return a summary if dealing with two tables
if len(tables) == 2:
# TODO: generate "Compare diff", "Compare diff relative"
# and "Compare ratio"
out = dfs[1] - dfs[0]
errors, scores = split_errors_and_scores(out.columns,
scores_arg,
axis_second=out.index,
transpose=True)
styled = out.style.apply(color_neg_green,
subset=errors,
axis='rows')
styled = styled.apply(color_neg_red, subset=scores, axis='rows')
else:
styled = None
return styled
def data2html_img(data):
"""Converts a png image (bytes) to HTML str with the image in base64
"""
img = base64.encodebytes(data).decode('utf-8')
return '<img src="data:image/png;base64, {}"/>'.format(img)
def to_html_str(content):
"""Returns an HTML string representation of the content
"""
if isinstance(content, Image):
return data2html_img(content.data)
elif isinstance(content, HTML):
return content.data
elif hasattr(content, '_repr_html_'):
return content._repr_html_()
elif isinstance(content, Mapping):
c = black.format_str(str(content), mode=_fm)
# add <pre></pre> to keep whitespace
return f'<pre>{c}</pre>'
else:
return str(content)
| {
"repo_name": "edublancas/sklearn-evaluation",
"path": "src/sklearn_evaluation/nb/NotebookCollection.py",
"copies": "1",
"size": "10961",
"license": "mit",
"hash": 6919255241869113000,
"line_mean": 29.7030812325,
"line_max": 152,
"alpha_frac": 0.5897272147,
"autogenerated": false,
"ratio": 3.955611692529773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045338907229773,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from dimagi.utils.dates import force_to_datetime
import fluff
from corehq.fluff.calculators.case import CasePropertyFilter
from custom.world_vision import WORLD_VISION_DOMAINS
from corehq.apps.users.models import CommCareUser, CommCareCase
from custom.utils.utils import flat_field
from custom.world_vision import user_calcs
from django.utils.dateformat import format
WV_DELETED_TYPES = ('CommCareCase-Deleted', )
class WorldVisionMotherFluff(fluff.IndicatorDocument):
def case_property(property):
return flat_field(lambda case: case.get_case_property(property))
document_class = CommCareCase
document_filter = CasePropertyFilter(type='ttc_mother')
deleted_types = WV_DELETED_TYPES
domains = WORLD_VISION_DOMAINS
group_by = ('domain', 'user_id')
save_direct_to_sql = True
name = flat_field(lambda case: case.name)
lvl_4 = case_property('phc')
lvl_3 = case_property('block')
lvl_2 = case_property('district')
lvl_1 = case_property('state')
reason_for_mother_closure = flat_field(lambda case: case.reason_for_mother_closure if hasattr(case, 'reason_for_mother_closure')
and case.reason_for_mother_closure else 'unknown')
mother_state = case_property('mother_state')
fp_method = case_property('fp_method')
anc_1 = case_property('anc_1')
anc_2 = case_property('anc_2')
anc_3 = case_property('anc_3')
anc_4 = case_property('anc_4')
tt_1 = case_property('tt_1')
tt_2 = case_property('tt_2')
tt_booster = case_property('tt_booster')
iron_folic = case_property('iron_folic')
completed_100_ifa = case_property('completed_100_ifa')
anemia_signs = case_property('anemia_signs')
currently_referred = case_property('currently_referred')
knows_closest_facility = case_property('knows_closest_facility')
edd = case_property('edd')
previous_tetanus = case_property('previous_tetanus')
pp_1_done = case_property('pp_1_done')
pp_2_done = case_property('pp_2_done')
pp_3_done = case_property('pp_3_done')
pp_4_done = case_property('pp_4_done')
delivery_date = case_property('delivery_date')
cause_of_death_maternal = case_property('cause_of_death_maternal')
place_of_birth = case_property('place_of_birth')
birth_attendant_during_delivery = case_property('birth_attendant_during_delivery')
type_of_delivery = case_property('type_of_delivery')
date_of_mother_death = case_property('date_of_mother_death')
number_of_children = user_calcs.NumberChildren()
number_of_boys = user_calcs.NumberBoys()
number_of_girls = user_calcs.NumberGirls()
number_of_children_born_dead = user_calcs.StillBirth()
opened_on = flat_field(lambda case: case.opened_on.date() if case.opened_on else None)
closed_on = flat_field(lambda case: case.closed_on.date() if case.closed_on else None)
women_registered = user_calcs.MotherRegistered()
def referenced_case_attribute(case, field_name):
if not case.indices[0]['referenced_id']:
return ""
referenced_case = CommCareCase.get(case.indices[0]['referenced_id'])
if hasattr(referenced_case, field_name):
return getattr(referenced_case, field_name)
else:
return ""
def get_datepart(case, t='n'):
child_date_of_death = case.get_case_property('child_date_of_death')
if child_date_of_death:
return format(force_to_datetime(child_date_of_death), t)
else:
return ""
def calculate_weight(case):
weight_birth = case.get_case_property('weight_birth')
if weight_birth:
#Probably measured in grams. Should be converted to kilograms
if float(weight_birth) > 10:
return str(float(weight_birth) / 1000.0)
else:
return weight_birth
return ""
# This calculator is necessary to generate 'date' field which is required in the database
class Numerator(fluff.Calculator):
@fluff.null_emitter
def numerator(self, case):
yield None
class WorldVisionHierarchyFluff(fluff.IndicatorDocument):
def user_data(property):
"""
returns a flat field with a callable looking for `property` on the user
"""
return flat_field(lambda user: user.user_data.get(property))
document_class = CommCareUser
domains = WORLD_VISION_DOMAINS
group_by = ('domain',)
save_direct_to_sql = True
numerator = Numerator()
lvl_4 = user_data('phc')
lvl_3 = user_data('block')
lvl_2 = user_data('district')
lvl_1 = user_data('state')
class WorldVisionChildFluff(fluff.IndicatorDocument):
def case_property(property):
return flat_field(lambda case: case.get_case_property(property))
document_class = CommCareCase
document_filter = CasePropertyFilter(type='ttc_child')
deleted_types = WV_DELETED_TYPES
domains = WORLD_VISION_DOMAINS
group_by = ('domain', 'user_id')
save_direct_to_sql = True
name = flat_field(lambda case: case.name)
mother_id = flat_field(lambda case: case.indices[0]['referenced_id'])
lvl_4 = flat_field(partial(referenced_case_attribute, field_name='phc'))
lvl_3 = flat_field(partial(referenced_case_attribute, field_name='block'))
lvl_2 = flat_field(partial(referenced_case_attribute, field_name='district'))
lvl_1 = flat_field(partial(referenced_case_attribute, field_name='state'))
reason_for_child_closure = case_property('reason_for_child_closure')
bcg = case_property('bcg')
opv0 = case_property('opv0')
hepb0 = case_property('hepb0')
opv1 = case_property('opv1')
hepb1 = case_property('hepb1')
dpt1 = case_property('dpt1')
opv2 = case_property('opv2')
hepb2 = case_property('hepb2')
dpt2 = case_property('dpt2')
opv3 = case_property('opv3')
hepb3 = case_property('hepb3')
dpt3 = case_property('dpt3')
measles = case_property('measles')
vita1 = case_property('vita1')
vita2 = case_property('vita2')
dpt_opv_booster = case_property('dpt_opv_booster')
vita3 = case_property('vita3')
type_of_child_death = case_property('type_of_child_death')
cause_of_death_child = case_property('cause_of_death_child')
pneumonia_since_last_visit = case_property('pneumonia_since_last_visit')
has_diarrhea_since_last_visit = case_property('has_diarrhea_since_last_visit')
dairrhea_treated_with_ors = case_property('dairrhea_treated_with_ors')
dairrhea_treated_with_zinc = case_property('dairrhea_treated_with_zinc')
weight_birth = flat_field(calculate_weight)
breastfeed_1_hour = case_property('breastfeed_1_hour')
exclusive_breastfeeding = case_property('exclusive_breastfeeding')
comp_breastfeeding = case_property('comp_breastfeeding')
supplementary_feeding_baby = case_property('supplementary_feeding_baby')
deworm = case_property('deworm')
ebf_stop_age_month = case_property('ebf_stop_age_month')
gender = case_property('gender')
opened_on = flat_field(lambda case: case.opened_on)
closed_on = flat_field(lambda case: case.closed_on)
dob = flat_field(lambda case: case.dob)
date_of_death = case_property('child_date_of_death')
month_of_death = flat_field(get_datepart)
year_of_death = flat_field(partial(get_datepart, t='Y'))
women_registered = user_calcs.ChildRegistered()
WorldVisionMotherFluffPillow = WorldVisionMotherFluff.pillow()
WorldVisionChildFluffPillow = WorldVisionChildFluff.pillow()
WorldVisionHierarchyFluffPillow = WorldVisionHierarchyFluff.pillow()
| {
"repo_name": "puttarajubr/commcare-hq",
"path": "custom/world_vision/models.py",
"copies": "2",
"size": "7599",
"license": "bsd-3-clause",
"hash": 2811377786907437600,
"line_mean": 38.578125,
"line_max": 140,
"alpha_frac": 0.68838005,
"autogenerated": false,
"ratio": 3.1915161696766066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9875491351563935,
"avg_score": 0.0008809736225344174,
"num_lines": 192
} |
from functools import partial
from django.conf import settings
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django import template
import pytz
from django.utils.html import escape
from django.utils.translation import ugettext as _
from couchdbkit.exceptions import ResourceNotFound
from corehq import privileges
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled
from corehq.apps.receiverwrapper.auth import AuthContext
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id, DocInfo
from corehq.apps.reports.formdetails.readable import get_readable_data_for_submission
from corehq import toggles
from corehq.util.timezones.conversions import ServerTime
from couchforms.models import XFormInstance
from casexml.apps.case.xform import extract_case_blocks
from casexml.apps.case import const
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.templatetags.case_tags import case_inline_display
from corehq.apps.hqwebapp.templatetags.proptable_tags import (
get_tables_as_columns, get_definition)
from django_prbac.utils import has_privilege
register = template.Library()
@register.simple_tag
def render_form_xml(form):
xml = form.get_xml() or ''
return '<pre class="fancy-code prettyprint linenums"><code class="language-xml">%s</code></pre>' % escape(xml.replace("><", ">\n<"))
@register.simple_tag
def form_inline_display(form_id, timezone=pytz.utc):
if form_id:
try:
form = XFormInstance.get(form_id)
if form:
return "%s: %s" % (ServerTime(form.received_on).user_time(timezone).done().date(), form.xmlns)
except ResourceNotFound:
pass
return "%s: %s" % (_("missing form"), form_id)
return _("empty form id found")
def sorted_case_update_keys(keys):
"""Put common @ attributes at the bottom"""
return sorted(keys, key=lambda k: (k[0] == '@', k))
def sorted_form_metadata_keys(keys):
def mycmp(x, y):
foo = ('timeStart', 'timeEnd')
bar = ('username', 'userID')
if x in foo and y in foo:
return -1 if foo.index(x) == 0 else 1
elif x in foo or y in foo:
return 0
if x in bar and y in bar:
return -1 if bar.index(x) == 0 else 1
elif x in bar and y in bar:
return 0
return cmp(x, y)
return sorted(keys, cmp=mycmp)
@register.simple_tag
def render_form(form, domain, options):
"""
Uses options since Django 1.3 doesn't seem to support templatetag kwargs.
Change to kwargs when we're on a version of Django that does.
"""
# don't actually use the passed in timezone since we assume form submissions already come
# in in local time.
# todo: we should revisit this when we properly handle timezones in form processing.
timezone = pytz.utc
case_id = options.get('case_id')
side_pane = options.get('side_pane', False)
user = options.get('user', None)
case_id_attr = "@%s" % const.CASE_TAG_ID
_get_tables_as_columns = partial(get_tables_as_columns, timezone=timezone)
# Form Data tab
form_data, question_list_not_found = get_readable_data_for_submission(form)
# Case Changes tab
case_blocks = extract_case_blocks(form)
for i, block in enumerate(list(case_blocks)):
if case_id and block.get(case_id_attr) == case_id:
case_blocks.pop(i)
case_blocks.insert(0, block)
cases = []
for b in case_blocks:
this_case_id = b.get(case_id_attr)
try:
this_case = CommCareCase.get(this_case_id) if this_case_id else None
valid_case = True
except ResourceNotFound:
this_case = None
valid_case = False
if this_case and this_case._id:
url = reverse('case_details', args=[domain, this_case._id])
else:
url = "#"
definition = get_definition(sorted_case_update_keys(b.keys()))
cases.append({
"is_current_case": case_id and this_case_id == case_id,
"name": case_inline_display(this_case),
"table": _get_tables_as_columns(b, definition),
"url": url,
"valid_case": valid_case
})
# Form Metadata tab
meta = form.top_level_tags().get('meta', None) or {}
definition = get_definition(sorted_form_metadata_keys(meta.keys()))
form_meta_data = _get_tables_as_columns(meta, definition)
if 'auth_context' in form:
auth_context = AuthContext(form.auth_context)
auth_context_user_id = auth_context.user_id
auth_user_info = get_doc_info_by_id(domain, auth_context_user_id)
else:
auth_user_info = get_doc_info_by_id(domain, None)
auth_context = AuthContext(
user_id=None,
authenticated=False,
domain=domain,
)
meta_userID = meta.get('userID')
meta_username = meta.get('username')
if meta_userID == 'demo_user':
user_info = DocInfo(
domain=domain,
display='demo_user',
)
elif meta_username == 'admin':
user_info = DocInfo(
domain=domain,
display='admin',
)
else:
user_info = get_doc_info_by_id(domain, meta_userID)
edit_session_data = {'user_id': meta_userID}
if len(case_blocks) == 1 and case_blocks[0].get(case_id_attr):
edit_session_data["case_id"] = case_blocks[0].get(case_id_attr)
request = options.get('request', None)
user_can_edit = (
request and user and request.domain
and (user.can_edit_data() or user.is_commcare_user())
)
show_edit_submission = (
user_can_edit
and has_privilege(request, privileges.CLOUDCARE)
and toggle_enabled(request, toggles.EDIT_SUBMISSIONS)
)
# stuffing this in the same flag as case rebuild
show_resave = (
user_can_edit and toggle_enabled(request, toggles.CASE_REBUILD)
)
return render_to_string("reports/form/partials/single_form.html", {
"context_case_id": case_id,
"instance": form,
"form_meta": options.get('form_meta', {}),
"maps_api_key": settings.GMAPS_API_KEY,
"is_archived": form.doc_type == "XFormArchived",
"domain": domain,
'question_list_not_found': question_list_not_found,
"form_data": form_data,
"cases": cases,
"form_table_options": {
# todo: wells if display config has more than one column
"put_loners_in_wells": False
},
"form_meta_data": form_meta_data,
"auth_context": auth_context,
"auth_user_info": auth_user_info,
"user_info": user_info,
"side_pane": side_pane,
"user": user,
"edit_session_data": edit_session_data,
"show_edit_submission": show_edit_submission,
"show_resave": show_resave,
})
| {
"repo_name": "benrudolph/commcare-hq",
"path": "corehq/apps/reports/templatetags/xform_tags.py",
"copies": "1",
"size": "6974",
"license": "bsd-3-clause",
"hash": 3550223074278264300,
"line_mean": 34.2222222222,
"line_max": 136,
"alpha_frac": 0.6304846573,
"autogenerated": false,
"ratio": 3.5491094147582696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46795940720582696,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from django.contrib.auth.models import User
from rest_framework import serializers, fields
from contact.models import (Contact, Skill, ContactMail, ContactPhone,
ContactAddress, Group,
ContactErrorDuplicate, ContactErrorMultiple)
from member.models import MemberShip
class QuasarSerializerMixin(object):
'''Implement hack on serializer to match quasar format and ease js code'''
def run_validation(self, data=fields.empty):
for key, value in data.items():
if isinstance(self.fields[key], fields.DateField):
# cut the value from "1983-03-19T23:00:00.000Z" to
# "1983-03-19"
if value:
data[key] = value[:10]
else:
data[key] = None
return super().run_validation(data)
def to_representation(self, instance):
data = super().to_representation(instance)
for key, value in data.items():
if isinstance(self.fields[key], fields.DateField):
# add to the value from "1983-03-19" to
# "1983-03-19T00:00:00.000Z"
if value:
data[key] = value + 'T00:00:00.000Z'
else:
data[key] = ''
return data
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('pk', 'title')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactMailSmallSerializer(serializers.ModelSerializer):
class Meta:
model = ContactMail
fields = ('email', )
class ContactMailInContactSerializer(serializers.ModelSerializer):
class Meta:
model = ContactMail
fields = ('pk', 'email', 'kind', 'validity', 'primary', 'verified')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactMailSerializer(serializers.ModelSerializer):
class Meta:
model = ContactMail
fields = ('pk', 'contact', 'email', 'kind', 'validity', 'primary', 'verified')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactPhoneSmallSerializer(serializers.ModelSerializer):
class Meta:
model = ContactPhone
fields = ('phone', )
class ContactPhoneInContactSerializer(serializers.ModelSerializer):
class Meta:
model = ContactPhone
fields = ('pk', 'phone', 'kind', 'validity')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactPhoneSerializer(serializers.ModelSerializer):
class Meta:
model = ContactPhone
fields = ('pk', 'contact', 'phone', 'kind', 'validity')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactAddressInContactSerializer(serializers.ModelSerializer):
class Meta:
model = ContactAddress
fields = ('pk', 'kind', 'validity', 'line1', 'line2', 'line3',
'city', 'region', 'postal_code', 'country')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactAddressSerializer(serializers.ModelSerializer):
class Meta:
model = ContactAddress
fields = ('pk', 'contact', 'kind', 'validity', 'line1', 'line2', 'line3',
'city', 'region', 'postal_code', 'country')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('pk', 'email')
extra_kwargs = {'pk': {'read_only': False,
'required': False}}
class ContactSmallSerializer(serializers.HyperlinkedModelSerializer):
emails = serializers.SerializerMethodField()
phones = serializers.SerializerMethodField()
class Meta:
model = Contact
fields = ('pk', 'name', 'emails', 'phones')
def get_emails(self, contact):
email = [email for email in contact.emails
if email.validity][0:1]
return ContactMailSmallSerializer(instance=email, many=True).data
def get_phones(self, contact):
phones = [phone for phone in contact.phones
if phone.validity][0:2]
return ContactPhoneSmallSerializer(instance=phones, many=True).data
class ContactSerializer(QuasarSerializerMixin,
serializers.HyperlinkedModelSerializer):
users = serializers.HyperlinkedRelatedField(
many=True, queryset=User.objects.all(),
view_name='api:user-detail', required=False)
delegates = serializers.HyperlinkedRelatedField(
many=True, queryset=User.objects.all(),
view_name='api:user-detail', required=False)
skills = SkillSerializer(many=True, required=False)
emails = ContactMailInContactSerializer(many=True, required=False)
phones = ContactPhoneInContactSerializer(many=True, required=False)
addresses = ContactAddressInContactSerializer(many=True, required=False)
class Meta:
model = Contact
fields = ('pk', 'users', 'delegates',
'family_name', 'given_name', 'name_prefix', 'name',
'birthday', 'gender', 'occupation', 'hobby', 'notes',
'teacher', 'vip_official', 'vip_fpmt', 'vip_other',
'sangha', 'ordination_date', 'ordination_master',
'deleted', 'kind',
'benevolent', 'availability', 'skills',
'emails', 'phones', 'addresses',
)
extra_kwargs = {'pk': {'read_only': False,
'required': False}, }
def create(self, validated_data):
# Extract relation related data
users_data = validated_data.pop('users', [])
delegates_data = validated_data.pop('delegates', [])
skills_data = validated_data.pop('skills', [])
emails_data = validated_data.pop('emails', [])
phones_data = validated_data.pop('phones', [])
addresses_data = validated_data.pop('addresses', [])
# Create contact
contact = Contact.objects.create(**validated_data)
# And now related read-only
for user_data in users_data:
# Do not create user from here
contact.users.add(User.objects.get(**user_data))
for delegate_data in delegates_data:
# Do not create user from here
contact.delegates.add(User.objects.get(**delegate_data))
# And now related read/write
for skill_data in skills_data:
contact.skills.add(Skill.objects.get_or_create(**skill_data)[0])
for email_data in emails_data:
email_data['contact'] = contact
contact.emails_set.add(
ContactMail.objects.get_or_create(**email_data)[0])
for phone_data in phones_data:
phone_data['contact'] = contact
contact.phones_set.add(
ContactPhone.objects.get_or_create(**phone_data)[0])
for address_data in addresses_data:
address_data['contact'] = contact
contact.addresses_set.add(
ContactAddress.objects.get_or_create(**address_data)[0])
return contact
def _update_nested_related(self, rels_data, obj_class, contact,
existing_set):
# remove no longer needed
existing_pks = set([e.pk for e in existing_set])
rels_data_pks = set([e['pk'] for e in rels_data])
to_remove_pks = existing_pks - rels_data_pks
for pk in to_remove_pks:
obj = obj_class.objects.get(pk=pk)
obj.delete()
# update existing in order
objs = obj_class.objects.filter(contact=contact).order_by('pk')
for index, rel_data in enumerate(rels_data):
# remove pk from rel_data as I'm supporting reordering here
rel_data.pop('pk', None)
obj = objs[index]
for key, value in rel_data.items():
setattr(obj, key, value)
obj.save()
def _update_nested_m2m(self, rels_data, obj_class, contact, m2m_field):
m2m = getattr(contact, m2m_field)
existing_set = set(m2m.all())
existing = set([obj_class.objects.get(pk=e['pk'])
for e in rels_data if 'pk' in e])
to_remove = existing_set - existing
m2m.remove(*to_remove)
to_create = [e for e in rels_data if 'pk' not in e]
to_create_obj = [obj_class.objects.create(**e) for e in to_create]
m2m.add(*to_create_obj)
to_add = existing - existing_set
m2m.add(*to_add)
def update(self, instance, validated_data):
contact = instance
# Extract relation related data
# related read-only
if 'users' in validated_data:
contact.users.clear()
for user_data in validated_data.pop('users'):
# Do not create user from here
contact.users.add(user_data)
if 'delegates' in validated_data:
contact.delegates.clear()
for delegate_data in validated_data.pop('delegates'):
# Do not create user from here
contact.delegates.add(delegate_data)
# And now related fields
if 'skills' in validated_data:
self._update_nested_m2m(validated_data.pop('skills'), Skill,
contact, 'skills')
if 'emails' in validated_data:
self._update_nested_related(validated_data.pop('emails'),
ContactMail, contact,
set(contact.emails))
if 'phones' in validated_data:
self._update_nested_related(validated_data.pop('phones'),
ContactPhone, contact,
set(contact.phones))
if 'addresses' in validated_data:
self._update_nested_related(validated_data.pop('addresses'),
ContactAddress, contact,
set(contact.addresses))
# Set attributes of contact
for key, value in validated_data.items():
setattr(contact, key, value)
contact.save()
return contact
class MemberShipSerializer(QuasarSerializerMixin,
serializers.HyperlinkedModelSerializer):
contact = ContactSmallSerializer()
class Meta:
model = MemberShip
fields = ('pk', 'contact', 'creation_date', 'kind')
extra_kwargs = {'pk': {'read_only': False,
'required': False}, }
class GroupSmallSerializer(serializers.ModelSerializer):
members_count = serializers.SerializerMethodField(required=False)
class Meta:
model = Group
fields = ('pk', 'name', 'description', 'system', 'members_count')
def get_members_count(self, group):
return group.members_count()
class GroupSerializer(serializers.ModelSerializer):
members = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('pk', 'name', 'description', 'system', 'members')
def get_members(self, group):
members = group.members()
return ContactSmallSerializer(instance=members, many=True).data
class ContactErrorDuplicateSerializer(serializers.ModelSerializer):
old = ContactSmallSerializer()
new = ContactSmallSerializer()
created = serializers.SerializerMethodField()
class Meta:
model = ContactErrorDuplicate
fields = ('pk', 'solved', 'message', 'kind', 'old', 'new', 'created',
'family_name_ratio', 'given_name_ratio', 'email_ratio')
def get_created(self, contact_error):
try:
h = contact_error.history.all()[0]
return h.history_date.isoformat()
except IndexError:
return ''
class ContactErrorMultipleSerializer(serializers.ModelSerializer):
contact = ContactSmallSerializer()
created = serializers.SerializerMethodField()
class Meta:
model = ContactErrorMultiple
fields = ('pk', 'solved', 'message', 'kind', 'contact', 'field_name',
'created')
def get_created(self, contact_error):
try:
h = contact_error.history.all()[0]
return h.history_date.isoformat()
except IndexError:
return ''
| {
"repo_name": "NoumirPoutipou/oanq",
"path": "api/serializers.py",
"copies": "1",
"size": "12780",
"license": "bsd-3-clause",
"hash": -368433609351312300,
"line_mean": 36.5882352941,
"line_max": 86,
"alpha_frac": 0.5781690141,
"autogenerated": false,
"ratio": 4.28140703517588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5359576049275879,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from django.contrib import admin, messages
from .models import Grain, Hop, UserOrder, Supplier, OrderItem, SupplierOrder, OrdersEnabled, Surcharge
from orders.forms import SupplierOrderAdminForm
class IngredientAdmin(admin.ModelAdmin):
list_display = ("name", "supplier_name", "unit_cost_excl_gst", "unit_size")
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 1
verbose_name_plural = "Add Ingredients"
verbose_name = "Item"
fields = ("ingredient", "quantity")
class ReadOnlyOrderItemInline(OrderItemInline):
def get_readonly_fields(self, request, obj=None):
return self.model._meta.get_all_field_names()
def flag_as_paid(modeladmin, request, queryset):
queryset.update(status=UserOrder.STATUS_PAID)
class UserOrderAdmin(admin.ModelAdmin):
inlines = (OrderItemInline, )
list_display = ("id", "username", "total_excl_gst", "status")
readonly_fields = ("total_excl_gst", )
actions = (flag_as_paid, )
class SupplierOrderAdmin(admin.ModelAdmin):
list_display = ("supplier_name", "status", "total_excl_gst")
readonly_fields = ("supplier", "total_excl_gst", "total_incl_gst")
change_form_template = 'orders/supplier_order_change_form.html'
def get_queryset(self, request):
supplier_orders = SupplierOrder.objects.filter(status=SupplierOrder.STATUS_PENDING)
for supplier in Supplier.objects.all():
order, _ = supplier_orders.get_or_create(supplier=supplier)
OrderItem.objects.filter(
supplier_order=None,
user_order__status=UserOrder.STATUS_PAID,
ingredient__supplier=supplier).update(supplier_order=order)
OrderItem.objects.filter(
supplier_order=order,
user_order__status=UserOrder.STATUS_UNPAID,
ingredient__supplier=supplier).update(supplier_order=None)
return SupplierOrder.objects.all()
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def add_to_supplier_order(supplier_order, modeladmin, request, queryset):
if queryset.filter(user_order__status=UserOrder.STATUS_UNPAID).count() > 0:
messages.warning(request, "Some order items selected could not be added to supplier order as they havn't been paid for")
queryset.filter(user_order__status=UserOrder.STATUS_PAID).update(supplier_order=supplier_order)
def remove_from_supplier_orders(modeladmin, request, queryset):
queryset.update(supplier_order=None)
class OrderItemAdmin(admin.ModelAdmin):
list_display = ("ingredient", "quantity", "unit_size", "user_order", "supplier_order", "paid")
list_per_page = 200
list_filter = ("ingredient__supplier__name", "user_order__status")
readonly_fields = ("user_order", )
actions = (remove_from_supplier_orders, )
def get_actions(self, request):
actions = super(OrderItemAdmin, self).get_actions(request)
for order in SupplierOrder.pending_orders():
name = "Add to %s" % order
actions[name] = (partial(add_to_supplier_order, order), name, name)
return actions
def has_add_permission(self, request):
return False
class OrdersEnabledAdmin(admin.ModelAdmin):
list_display = ("name", "enabled", )
def name(self, *args, **kwargs):
return "Orders Enabled"
class SurchargeAdmin(admin.ModelAdmin):
list_display = ("name", "surcharge_percentage", "order_surcharge")
def name(self, *args, **kwargs):
return "Surcharges"
admin.site.register(Grain, IngredientAdmin)
admin.site.register(Hop, IngredientAdmin)
admin.site.register(UserOrder, UserOrderAdmin)
admin.site.register(Supplier)
admin.site.register(SupplierOrder, SupplierOrderAdmin)
admin.site.register(OrderItem, OrderItemAdmin)
admin.site.register(OrdersEnabled, OrdersEnabledAdmin)
admin.site.register(Surcharge, SurchargeAdmin)
| {
"repo_name": "gkampjes/ucbc",
"path": "orders/admin.py",
"copies": "2",
"size": "4003",
"license": "mit",
"hash": -6952299718924593000,
"line_mean": 35.3909090909,
"line_max": 128,
"alpha_frac": 0.6989757682,
"autogenerated": false,
"ratio": 3.6523722627737225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004394354356207468,
"num_lines": 110
} |
from functools import partial
from django.db.models import Avg
from django.http import HttpResponse
import json
from core.models import DBDataSource, GameResult, Entity
from logic.Akinator import Akinator
from logic.AkinatorDataSource import ANSWERS
sessions_pool = {}
ANSWERS_MAP = {
0: ANSWERS.YES,
1: ANSWERS.NO,
2: ANSWERS.DOES_NOT_MATTER,
3: ANSWERS.DO_NOT_KNOW,
}
class JsonResponse(HttpResponse):
def __init__(self, content, **kwargs):
kwargs.setdefault('content_type', 'application/json; charset=utf-16')
jContent = json.dumps(content, ensure_ascii=False).encode('utf-16')
super(JsonResponse, self).__init__(jContent, **kwargs)
def _remove_game(session_id):
sessions_pool.pop(session_id, None)
def start_game(request):
session = Akinator(DBDataSource())
sessions_pool[session.game_id] = session
session.set_finish_callback(partial(_remove_game, session.game_id))
content = {
'sessionId': session.game_id,
'firstQuestion': session.current_question.text,
}
return JsonResponse(content)
def get_game(request):
"""
Helper function to fetch game based on request data
"""
session_id = request.REQUEST['sessionId']
return sessions_pool[session_id]
def process_response(request):
akinator = get_game(request)
answer = ANSWERS_MAP[int(request.REQUEST['answer'])]
entity, question = akinator.process_answer(answer)
response = {
'nextQuestion': None,
'result': None,
}
if entity:
response['result'] = {
'name': entity.name,
'description': entity.description,
}
else:
response['nextQuestion'] = question.text
return JsonResponse(response)
def current_stats(request):
content = {
'entities': []
}
count = int(request.REQUEST['count'])
akinator = get_game(request)
top = akinator.get_top_hypothesis(count)
for entity, score in top:
content['entities'].append({
'name': entity.name,
'score': score,
})
return JsonResponse(content)
def end_game(request):
akinator = get_game(request)
status = request.REQUEST['answer']
if int(status) == 4:
akinator.hypothesis_accepted()
else:
if 'key' in request.REQUEST:
key = int(request.REQUEST['key'])
akinator.hypothesis_declined(key=key)
else:
name = request.REQUEST['name']
description = request.REQUEST['description']
akinator.hypothesis_declined(name=name, description=description)
return JsonResponse({'status': 'OK'})
def statistics(request):
games_count = GameResult.objects.count()
win_rate = GameResult.objects.filter(success=True).count() * 100.0 / games_count
avg_game_length = GameResult.objects.all().aggregate(Avg('game_length'))['game_length__avg']
content = {
'gamesCount': games_count,
'winRate': win_rate,
'avgLength': avg_game_length,
}
return JsonResponse(content)
def entities_list(request):
response = {
'entities': [],
}
entities = Entity.objects.all()
for entity in entities:
response['entities'].append({
'key': entity.pk,
'name': entity.name,
})
return JsonResponse(response)
| {
"repo_name": "igor-kondratiev/akinator",
"path": "akinator/core/views.py",
"copies": "1",
"size": "3372",
"license": "mit",
"hash": 2209695644066472400,
"line_mean": 23.4347826087,
"line_max": 96,
"alpha_frac": 0.631376038,
"autogenerated": false,
"ratio": 3.7466666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48780427046666663,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from django import template
from django.template.base import TagHelperNode, token_kwargs
from django.template.defaultfilters import safe
register = template.Library()
class EmailContentTransform(TagHelperNode):
def __init__(self, tag_name, parser, token):
self.tag_name = tag_name
bits = token.split_contents()[1:]
self.kwargs = {k: v.var for k,v in token_kwargs(bits, parser).items()}
nodelist = parser.parse(('end{0}'.format(tag_name),))
parser.delete_first_token()
self.nodelist = nodelist
def render(self, context):
email_template_namespace = context.get("alert_shardtype", "default")
shard_ext = context.get("alert_shard_ext", "txt")
template_file = "alerts/email_shards/{0}/{1}.{2}".format(
email_template_namespace,
self.tag_name,
shard_ext
)
t = context.template.engine.get_template(template_file)
content = self.nodelist.render(context)
with context.push(content=content, **self.kwargs):
rendered = t.render(context)
if shard_ext == "html":
rendered = safe(rendered.replace("\n", "<br>"))
return rendered
email_tags = [
"a", "p", "h1", "h2"
]
for tag_name in email_tags:
fn = partial(EmailContentTransform, tag_name)
register.tag(name=tag_name)(fn)
class EmailShardTypeNode(template.Node):
def __init__(self, shard_type, nodelist):
self.shard_type = shard_type
self.nodelist = nodelist
def render(self, context):
with context.push(alert_shardtype=self.shard_type):
return self.nodelist.render(context)
@register.tag
def shardtype(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, shard_type = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0]
)
if not (shard_type[0] == shard_type[-1] and shard_type[0] in ('"', "'")):
raise template.TemplateSyntaxError(
"%r tag's argument should be in quotes" % tag_name
)
shard_type = shard_type[1:-1]
nodelist = parser.parse(('end{0}'.format(tag_name),))
parser.delete_first_token()
return EmailShardTypeNode(shard_type, nodelist) | {
"repo_name": "jiaaro/django-alert",
"path": "alert/templatetags/alert_email_tags.py",
"copies": "1",
"size": "2410",
"license": "mit",
"hash": 2340435386034712600,
"line_mean": 29.5189873418,
"line_max": 78,
"alpha_frac": 0.6273858921,
"autogenerated": false,
"ratio": 3.843700159489633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969043895198155,
"avg_score": 0.00040843127829564766,
"num_lines": 79
} |
from functools import partial
from django import template
from django.template import Context
from django.template.base import Node, TemplateSyntaxError
from ..compat import generic_tag_compiler, getargspec
register = template.Library()
def lazy_tag(self, func=None, takes_context=None, name=None, node_class=None):
"""
A tag function decorator, injected on Django's template tag library, similar to simple_tag().
The decorated function gets called when the template node tree is built and should return
another function, responsible for the output, that later will be called within the rendering phase.
Note: if decorated with takes_context=True, context will not be available in the init phase.
@register.lazy_tag(takes_context=True)
def x(context, a, b, c=True, d=False):
# Init phase (no context)
def render(context):
# Render phase
return u'Content of argument a: %s' % a
return render
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(Node):
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
resolved_args, resolved_kwargs = self.get_resolved_arguments(Context({}))
self.resolved_args = resolved_args
self.resolved_kwargs = resolved_kwargs
self.render_func = func(*resolved_args, **resolved_kwargs)
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
def render(self, context):
return self.render_func(context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=node_class or SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
return dec # @register.lazy_tag(...)
elif callable(func):
return dec(func) # @register.lazy_tag
else:
raise TemplateSyntaxError("Invalid arguments provided to lazy_tag")
template.Library.lazy_tag = lazy_tag
| {
"repo_name": "5monkeys/djedi-cms",
"path": "djedi/templatetags/template.py",
"copies": "1",
"size": "2813",
"license": "bsd-3-clause",
"hash": 3192461349134118000,
"line_mean": 37.0135135135,
"line_max": 103,
"alpha_frac": 0.6139353004,
"autogenerated": false,
"ratio": 4.486443381180223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5600378681580223,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from django.shortcuts import render_to_response
from django.template import RequestContext
from . import models
def blog_list(request):
blogs = models.Blog.objects.all()
return render_to_response('fn_blog/blog_list.html', locals(),
context_instance=RequestContext(request))
def blog(request, id):
blog = models.Blog.objects.get(id=int(id))
entries = blog.entries_by_user(request.user)
fn_category_view = partial(blog_category, blog.id)
return render_to_response('fn_blog/blog.html', locals(),
context_instance=RequestContext(request))
def blog_category(request, id, category_id):
blog = models.Blog.objects.get(id=int(id))
category = models.Category.objects.get(id=int(category_id))
entries = blog.entries_by_user(request.user)
entries = entries.filter(categories__in=category.descendants(inclusive=True))
fn_category_view = partial(blog_category, blog.id)
fn_category_selected = [category]
return render_to_response('fn_blog/blog.html', locals(),
context_instance=RequestContext(request))
def entry(request, id):
entry = models.Entry.objects.get(id=int(id))
blog = entry.blog
fn_category_view = partial(blog_category, blog.id)
fn_category_selected = set(entry.categories.iterator())
return render_to_response('fn_blog/entry.html', locals(),
context_instance=RequestContext(request))
| {
"repo_name": "fengb/fn",
"path": "fn_blog/views.py",
"copies": "1",
"size": "1519",
"license": "bsd-3-clause",
"hash": -5024991397985506000,
"line_mean": 34.3255813953,
"line_max": 81,
"alpha_frac": 0.6741277156,
"autogenerated": false,
"ratio": 3.8553299492385786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002836074872376631,
"num_lines": 43
} |
from functools import partial
from email.utils import unquote
from email.mime.image import MIMEImage
from django.core.mail import make_msgid
from django.utils.module_loading import import_string
from django.conf import settings
import six
def _get_klass_from_config(config_variable, default):
klass_path = getattr(settings, config_variable, default)
if isinstance(klass_path, six.string_types):
klass_path = import_string(klass_path)
return klass_path
get_emailmessage_klass = partial(
_get_klass_from_config,
'TEMPLATED_EMAIL_EMAIL_MESSAGE_CLASS',
'django.core.mail.EmailMessage'
)
get_emailmultialternatives_klass = partial(
_get_klass_from_config,
'TEMPLATED_EMAIL_EMAIL_MULTIALTERNATIVES_CLASS',
'django.core.mail.EmailMultiAlternatives',
)
class InlineImage(object):
def __init__(self, filename, content, subtype=None, domain=None):
self.filename = filename
self._content = content
self.subtype = subtype
self.domain = domain
self._content_id = None
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content_id = None
self._content = value
def attach_to_message(self, message):
if not self._content_id:
self.generate_cid()
image = MIMEImage(self.content, self.subtype)
image.add_header('Content-Disposition', 'inline', filename=self.filename)
image.add_header('Content-ID', self._content_id)
message.attach(image)
def generate_cid(self):
self._content_id = make_msgid('img', self.domain)
def __str__(self):
if not self._content_id:
self.generate_cid()
return 'cid:' + unquote(self._content_id)
| {
"repo_name": "BradWhittington/django-templated-email",
"path": "templated_email/utils.py",
"copies": "1",
"size": "1791",
"license": "mit",
"hash": 9175526399067090000,
"line_mean": 26.5538461538,
"line_max": 81,
"alpha_frac": 0.6700167504,
"autogenerated": false,
"ratio": 3.7705263157894735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49405430661894734,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from enum import Enum
from os import rename
import subprocess
SOURCE_FILE_NAME = 'packages.txt'
OUTPUT_FILE_NAME = 'packages.results.txt'
OLD_FILE_NAME = 'packages.old.txt'
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class PkgMinipultor:
class Status(Enum):
UNDECIDED = 0
KEEP = 1
REMOVE = 2
def __init__(self, name):
self.name = name
self.rdeps = None
self.rrdeps = None
self.show = None
self.status = PkgMinipultor.Status.UNDECIDED
def has_rdeps(self):
if self.rdeps == None:
self.get_rdeps()
return self.rdeps.stdout.decode('utf-8').count('\n') > 2
def get_show_info(self):
if self.show == None:
self.show = subprocess.run(['apt-cache', 'show', self.name], stdout=subprocess.PIPE)
return self.show.stdout.decode('utf-8')
def mark_auto(self):
term_auto = subprocess.run(['sudo', 'apt-mark', 'auto', self.name], stdout=subprocess.PIPE)
self.mark_remove()
return term_auto.stdout.decode('utf-8')
def mark_keep(self):
self.status = PkgMinipultor.Status.KEEP
def mark_remove(self):
self.status = PkgMinipultor.Status.REMOVE
def get_rdeps(self):
if self.rdeps == None:
self.rdeps = subprocess.run(['apt', 'rdepends', '--installed', self.name], stdout=subprocess.PIPE)
return self.rdeps.stdout.decode('utf-8')
def get_rrdeps(self):
if self.rrdeps == None:
self.rrdeps = subprocess.run(['apt', 'rdepends', '--installed', '--recurse', self.name], stdout=subprocess.PIPE)
return self.rrdeps.stdout.decode('utf-8')
class Menu:
def __init__(self):
self.quit = False
def __str__(self):
return (" s -> apt show package\n"
" p -> pass on package (do nothing)\n"
" h -> help\n"
" a -> sudo apt-make auto package\n"
" c -> confirm manual\n"
" q -> quit\n"
" r -> apt rdepends --installed\n"
" rr -> apt rdepends --installed --recurse\n")
def handle_response(self, pkg):
def respond_quit():
self.quit = True
responses = {
'h': partial(print, str(self)),
's': partial(print, pkg.get_show_info()),
'r': partial(print, pkg.get_rdeps()),
'rr': partial(print, pkg.get_rrdeps()),
'p': pkg.mark_keep,
'a': pkg.mark_auto,
'c': pkg.mark_remove,
'q': respond_quit,
}
while (not self.quit) and pkg.status == PkgMinipultor.Status.UNDECIDED:
response = input(bcolors.BOLD + 'Enter a command (h for help): ' + bcolors.ENDC)
try:
responses[response]()
except:
print('Invalid response')
with open(SOURCE_FILE_NAME) as source_file, open(OUTPUT_FILE_NAME, 'w+') as output_file:
menu = Menu()
for line in source_file:
pkg = PkgMinipultor(line.strip())
if (not menu.quit) and pkg.has_rdeps():
print(pkg.get_rdeps())
menu.handle_response(pkg)
if pkg.status == PkgMinipultor.Status.REMOVE:
continue # Do not write pkg to output_file.
output_file.write(line)
rename(SOURCE_FILE_NAME, OLD_FILE_NAME)
rename(OUTPUT_FILE_NAME, SOURCE_FILE_NAME)
| {
"repo_name": "kriswithank/apt-marker",
"path": "apt-marker.py",
"copies": "1",
"size": "3636",
"license": "mit",
"hash": -6699307571378592000,
"line_mean": 24.7872340426,
"line_max": 125,
"alpha_frac": 0.5533553355,
"autogenerated": false,
"ratio": 3.4661582459485225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9493276488485851,
"avg_score": 0.0052474185925341105,
"num_lines": 141
} |
from functools import partial
from enum import Enum
from sentinels import NOTHING
class InvalidCommand(DocDescribed, ValueError):
"Command {name} was not found"
def __init__(self, name):
self.name = name
class InvalidRevision(DocDescribed, Exception):
"Expected {expected} revision but got {got}"
def __init__(self, got, expected):
self.got = got
self.expected = expected
class Existence(Enum):
"""
I've used this instead of True and False only for clarification reason,
because booleans are not self-described.
"""
CREATED = True
REMOVED = False
class Cell(object):
def __init__(self):
self._value = NOTHING
@property
def empty(self):
return self._value is NOTHING
def set(self, value):
self._value = value
def get(self):
return self._value
class Branch(object):
cell_factory = Cell
def __init__(self):
self._cells = defaultdict(self.cell_factory)
def add(self, rev, value):
assert self._cells[rev].empty
self._cells[rev].set(value)
def get(self, rev):
assert not self._cells[rev].empty
return self._cells[rev].get()
def top(self):
return max(self._cells)
class Repo(object):
branch_factory = Branch
def __init__(self):
self._their = self.branch_factory()
self._conflict = self.branch_factory()
self._unresolved = deque()
self._receivers = []
def resolve(self, rev, origin, value):
self._their.add(rev, value)
expected = self._unresolved.popleft()
if expected != origin:
raise InvalidRevision(origin, expected)
def initiate(self, rev, value):
self._conflict.add(rev, value)
self._unresolved.append(rev)
def store(self, rev, value):
self._their.add(rev, value)
def _publish(self, event, *args, **kwargs):
for callback in self._receivers:
callback(this, event, *args, **kwargs)
def subscribe(self, callback):
if callback in self._receivers:
return
self._receivers.append(callback)
def unsubscribe(self, callback):
if callback not in self._receivers:
return
self._receivers.remove(callback)
class ClientGraph(object):
repo_factory = Repo
def __init__(self):
self._rev = 0
self._nodes = defaultdict(self.repo_factory)
self._ports = defaultdict(self.repo_factory)
self._states = defaultdict(self.repo_factory)
self._links = defaultdict(self.repo_factory)
# ======================================================================== #
def create_node(self, id):
node = self._nodes[id]
node.initiate(self._rev, Existence.CREATED)
def remove_node(self, id):
node = self._nodes[id]
node.initiate(self._rev, Existence.REMOVED)
def add_link(self, start_id, start_name, end_id, end_name):
key = (start_id, start_name, end_id, end_name)
link = self._links[key]
link.initiate(self._rev, Existence.CREATED)
def remove_link(self, start_id, start_name, end_id, end_name):
key = (start_id, start_name, end_id, end_name)
link = self._links[key]
link.initiate(self._rev, Existence.REMOVED)
# ======================================================================== #
def node_added(self, id, rev, origin):
node = self._nodes[id]
if origin is not None:
node.resolve(rev, origin, Existence.CREATED)
else:
node.store(rev, Existence.CREATED)
def node_removed(self, id, rev, origin):
node = self._nodes[id]
if origin is not None:
node.resolve(rev, origin, Existence.REMOVED)
else:
node.store(rev, Existence.REMOVED)
def ports_changed(self, id, ports, rev, origin):
node = self._states[id]
if origin is not None:
node.resolve(rev, origin, ports)
else:
node.store(rev, ports)
def state_changed(self, id, state, rev, origin):
node = self._states[id]
if origin is not None:
node.resolve(rev, origin, state)
else:
node.store(rev, state)
def link_added(self, start_id, start_name, end_id, end_name, rev, origin):
key = (start_id, start_name, end_id, end_name)
link = self._links[key]
if origin is not None:
link.resolve(rev, origin, Existence.CREATED)
else:
link.store(rev, Existence.CREATED)
def link_remove(self, start_id, start_name, end_id, end_name, rev, origin):
key = (start_id, start_name, end_id, end_name)
link = self._links[key]
if origin is not None:
link.resolve(rev, origin, Existence.REMOVED)
else:
link.store(rev, Existence.REMOVED)
class ClientGraphModel(object):
graph_factory = ClientGraph
def __init__(self):
self._graph = self.graph_factory()
self._server_rev = 0
@property
def graph(self):
return self._graph
def dispatch(self, name, *args, **kwargs):
func = getattr(self, "on_" + name, None)
if func is None:
raise InvalidCommand(name)
func(*args, **kwargs)
def _check_rev(self, rev):
if rev != self._server_rev:
raise InvalidRevision(rev, self._server_rev)
self._server_rev = rev + 1
def on_nop(self, rev):
self._check_rev(rev)
def on_createNode(self, id, rev, origin=None):
self._check_rev(rev)
self.graph.node_added(id, rev, origin)
def on_removeNode(self, id, rev, origin=None):
self._check_rev(rev)
self.graph.node_removed(id, rev, origin)
def on_changeState(self, id, state, rev, origin=None):
self._check_rev(rev)
self.graph.state_changed(id, state, rev, origin)
def on_changePorts(self, id, ports, rev, origin=None):
self._check_rev(rev)
self.graph.ports_changed(id, ports, rev, origin)
def on_addLink(self, start_id, start_name, end_id, end_name, rev, origin=None):
self._check_rev(rev)
self.graph.link_added(start_id, start_name, end_id, end_name, rev, origin)
def on_removeLink(self, start_id, start_name, end_id, end_name, rev, origin=None):
self._check_rev(rev)
self.graph.link_removed(start_id, start_name, end_id, end_name, rev, origin)
| {
"repo_name": "Evgenus/revigred-server",
"path": "revigred/model/graph/client.py",
"copies": "1",
"size": "6493",
"license": "bsd-3-clause",
"hash": -1981543705146031000,
"line_mean": 29.919047619,
"line_max": 86,
"alpha_frac": 0.5858616972,
"autogenerated": false,
"ratio": 3.6559684684684686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47418301656684686,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from .flacro import FlacroFor
from flask import url_for
class LiItem(FlacroFor):
"""A list item containing a link. Any kwargs left over will be passed to
url creation
:param li_tag: a tag for item
:param kind: type of li item: plain(tag only), link, macro
:param li: is an actual html li, defaults True, set False to eliminate <li></li>
:param css_class: a css class
:param for_url: the flask route passed to url_for or a url
:param external: for_url is external
"""
def __init__(self, li_tag, **kwargs):
self.li_tag = li_tag
self.kind = kwargs.pop('kind', 'plain')
self.li = kwargs.pop('li', True)
self.css_class = kwargs.pop('css_class', None)
self._route = kwargs.pop('for_url', None)
self._route_external = kwargs.pop('external', False)
self._route_add = kwargs
if self._route:
self.url = partial(self.generate_url, self._route, self._route_add, external=self._route_external)
super(LiItem, self).__init__(mname="{}item".format(self.kind), mwhere="macros/list.html")
@staticmethod
def generate_url(route, route_add, external=False):
if external:
return route
else:
return url_for(route, **route_add)
class ListMacro(FlacroFor):
"""A generalized list (links, macros, or anything)
:param list_tag: a tag for the lsit
:param kind: type of list ul or ol
:param list_items: a list of LiItems items
:param css_class: a css class
:param css_id: a css id
"""
def __init__(self, list_items, **kwargs):
self.list_tag = kwargs.get('list_tag', None)
self.kind = kwargs.get('kind', 'ul')
self.list_items = list_items
self.css_class = kwargs.get('css_class', None)
self.css_id = kwargs.get('css_id', None)
super(ListMacro, self).__init__(mname='listmacro', mwhere="macros/list.html")
class AccordionItem(object):
def __init__(self,
group_label,
interior,
is_open=False,
display_label=None):
self.group_label = group_label
self.interior = interior
self.is_open = is_open
self.of_accordion = None
self.display_label = display_label
@property
def label(self):
if self.display_label:
return self.display_label
else:
return self.group_label
class AccordionGroupMacro(FlacroFor):
def __init__(self, accordions, tag='accordionset', **kwargs):
self.accordions = accordions
self.tag = tag
self.kind = get_kind(kwargs.get('kind', None))
self.mname = "{}_accordion".format(self.kind)
self.mwhere = kwargs.get('mwhere', "macros/accordion.html")
self.css_class = self.tag
for opt in ('close_others'):
setattr(self, opt, kwargs.get(opt, False))
if self.accordions and self.kind == 'bootstrap':
[setattr(g, 'of_accordion', self.tag)
for g in self.accordions]
super(AccordionGroupMacro, self).__init__(tag=self.tag,
mname=self.mname,
mwhere=self.mwhere)
class TabItem(object):
def __init__(self, label, tab_label, **kwargs):
self.label = label
self.tab_label = tab_label
self.set_tab(kwargs)
def set_tab(self, kwargs):
if kwargs:
for k, v in kwargs.items():
if k in ('minimal', 'external', 'static', 'independent', 'content'):
tab_type = k
tab_content = v
else:
tab_type = 'content'
tab_content = "None"
setattr(self, 'kind', tab_type)
setattr(self, '_li', getattr(self, "make_li", None)(tab_type))
setattr(self, tab_type, tab_content)
def make_li(self, tab_type):
tab = "{}_li".format(tab_type)
return FlacroFor(mwhere="macros/tab.html",
mname=tab).renderable
class TabGroupMacro(FlacroFor):
def __init__(self, tabs, tag='tabset', **kwargs):
self.tabs = tabs
self.tag = tag
self.kind = get_kind(kwargs.get('kind', None))
self.mname = "{}_tabs".format(self.kind)
self.mwhere = kwargs.get('mwhere', "macros/tab.html")
self.css_class = self.tag
for opt in ('vertical', 'justified', 'pills'):
setattr(self, opt, kwargs.get(opt, False))
super(TabGroupMacro, self).__init__(tag=self.tag,
mname=self.mname,
mwhere=self.mwhere)
def get_kind(kind):
if kind in ('minimal', 'bootstrap'):
return kind
else:
return 'minimal'
| {
"repo_name": "thrisp/flacro",
"path": "flask_flacro/packaged_macros.py",
"copies": "1",
"size": "4965",
"license": "mit",
"hash": -8157126750519315000,
"line_mean": 35.2408759124,
"line_max": 110,
"alpha_frac": 0.552265861,
"autogenerated": false,
"ratio": 3.669623059866962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47218889208669623,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from fn.op import identity
default_cmp = (lambda a,b: -1 if (a < b) else 1)
class _MergeBased(object):
def __nonzero__(self):
return self.root is not None
def __bool__(self):
return self.__nonzero__()
def __iter__(self):
"""Extract elements one-by-one.
Note, that list(*Heap()) gives you sorted list as result.
"""
curr = self
while curr:
r, curr = curr.extract()
yield r
def __lt__(self, other):
if (not self) and (not other): return False
if not self: return True
if not other: return False
return self.cmpfn(self.keyfn(self.root), self.keyfn(other.root)) < 0
class SkewHeap(_MergeBased):
"""A skew heap (or self-adjusting heap) is a heap data structure
implemented as a binary-tree. Amortized complexity analytics can
be used to demonstrate that all operations one a skew heap can be
done in O(log n).
Skew heaps may be described with the following recursive definition:
* a heap with only one element is a skew heap
* the result of skew merging two skew heaps is also a skew heap
In Haskell type definition it should looks like following:
data Skew a = Empty | Node a (Skew a) (Skew a)
More information on Wikipedia:
[1] http://en.wikipedia.org/wiki/Skew_heap
One can also check slides from my KyivPy#11 talk "Union-based heaps":
[2] http://goo.gl/VMgdG2
Basic usage sample:
>>> from fn.immutable import SkewHeap
>>> s = SkewHeap(10)
>>> s = s.insert(20)
>>> s
<fn.immutable.heap.SkewHeap object at 0x10b14c050>
>>> s = s.insert(30)
>>> s
<fn.immutable.heap.SkewHeap object at 0x10b14c158> # <-- other object
>>> s.extract()
(10, <fn.immutable.heap.SkewHeap object at 0x10b14c050>)
>>> _, s = s.extract()
>>> s.extract()
(20, <fn.immutable.heap.SkewHeap object at 0x10b14c1b0>)
"""
__slots__ = ("root", "left", "right", "keyfn", "cmpfn", "_make_heap")
def __init__(self, el=None, left=None, right=None, key=None, cmp=None):
"""Creates skew heap with one element (or empty one)"""
self.root = el
self.left = left
self.right = right
self.keyfn = key or identity
self.cmpfn = cmp or default_cmp
self._make_heap = partial(self.__class__, key=self.keyfn, cmp=self.cmpfn)
def insert(self, el):
"""Returns new skew heap with additional element"""
return self._make_heap(el).union(self)
def extract(self):
"""Returns pair of values:
* minimum (or maximum regarding to given compare function)
* new skew heap without extracted element
Or None and empty heap if self is an empty heap.
"""
if not self: return None, self._make_heap()
return self.root, self.left.union(self.right) if self.left else self._make_heap()
def union(self, other):
"""Merge two heaps and returns new one (skew merging)"""
if not self: return other
if not other: return self
if self < other:
return self._make_heap(self.root, other.union(self.right), self.left)
return self._make_heap(other.root, self.union(other.right), other.left)
class PairingHeap(_MergeBased):
"""A pairing heap is either an empty heap, or a pair consisting of a root
element and a possibly empty list of pairing heap. The heap ordering property
requires that all the root elements of the subheaps in the list are not
smaller (bigger) than the root element of the heap.
In Haskell type definition it should looks like following:
data Pairing a = Empty | Node a [Pairing a]
Pairing heap has and excellent practical amortized performance. The amortized
time per extract is less than O(log n), find-min/find-max, merge and insert are O(1).
More information about performance bounds you can find here:
"The Pairing Heap: A New Form of Self-Adjusting Heap"
[1] http://www.cs.cmu.edu/afs/cs.cmu.edu/user/sleator/www/papers/pairing-heaps.pdf
More general information on Wikipedia:
[2] http://en.wikipedia.org/wiki/Pairing_heap
One can also check slides from my KyivPy#11 talk "Union-based heaps":
[3] http://goo.gl/VMgdG2
Basic usage sample:
>>> from fn.immutable import PairingHeap
>>> ph = PairingHeap("a")
>>> ph = ph.insert("b")
>>> ph
<fn.immutable.heap.PairingHeap object at 0x10b13fa00>
>>> ph = ph.insert("c")
>>> ph
<fn.immutable.heap.PairingHeap object at 0x10b13fa50>
>>> ph.extract()
('a', <fn.immutable.heap.PairingHeap object at 0x10b13fa00>)
>>> _, ph = ph.extract()
>>> ph.extract()
('b', <fn.immutable.heap.PairingHeap object at 0x10b13f9b0>)
"""
__slots__ = ("root", "subs", "keyfn", "cmpfn", "_make_heap")
def __init__(self, el=None, subs=None, key=None, cmp=None):
"""Creates singlton from given element
(pairing heap with one element or empty one)
"""
self.root = el
self.subs = subs
self.keyfn = key or identity
self.cmpfn = cmp or default_cmp
self._make_heap = partial(self.__class__, key=self.keyfn, cmp=self.cmpfn)
def insert(self, el):
"""Returns new pairing heap with additional element"""
return self.union(self._make_heap(el))
def extract(self):
"""Returns pair of values:
* minimum (or maximum regarding to given compare function)
* new pairing heap without extracted element
Or None and empty heap if self is an empty heap.
"""
if not self: return None, self._make_heap()
return self.root, PairingHeap._pairing(self._make_heap, self.subs)
def union(self, other):
"""Returns new heap as a result of merging two given
Note, that originally this operation for pairingi heap was
named "meld", see [1] and [2]. We use here name "union" to
follow consistent naming convention for all heap implementations.
"""
if not self: return other
if not other: return self
if self < other:
return self._make_heap(self.root, (other, self.subs))
return self._make_heap(other.root, (self, other.subs))
@staticmethod
def _pairing(heap, hs):
if hs is None: return heap()
(h1, tail) = hs
if tail is None: return h1
(h2, tail) = tail
return PairingHeap._pairing(heap, (h1.union(h2), tail))
| {
"repo_name": "FunctionalX/FunctionalX.py",
"path": "FunctionalX/src/fn/immutable/heap.py",
"copies": "2",
"size": "6562",
"license": "mit",
"hash": -5672335863462397000,
"line_mean": 34.6630434783,
"line_max": 89,
"alpha_frac": 0.6266382201,
"autogenerated": false,
"ratio": 3.5956164383561644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5222254658456165,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from _Framework.Util import group
from Ubermap import UbermapDevices
ubermap = UbermapDevices.UbermapDevices()
"""
Ubermap alpha v0.1: modified devices.py
---------------------------------------
This file has been modified from the version edited by Stray on 10/26/13 to allow
easy mapping of plugins, using custom configuration files. It also contains the
Live Community Mapping, which enhances the original mappings for Ableton's devices
(https://forum.ableton.com/viewtopic.php?f=55&t=198946&p=1562395#p1562395).
All modified variables and functions are prefixed with ubermap.
For more information, see http://github.com/tomduncalf/ubermap
Much credit due to Stray, TomViolenz and any others who figured out how to modify
mappings using this file in the first place.
---------------------------------------
Enjoy :)
Tom
"""
RCK_BANK1 = ('Macro 1', 'Macro 2', 'Macro 3', 'Macro 4', 'Macro 5', 'Macro 6', 'Macro 7', 'Macro 8')
RCK_BANKS = (RCK_BANK1,)
RCK_BOBS = (RCK_BANK1,)
RCK_BNK_NAMES = ('Macros',)
ALG_BANK1 = ('OSC1 Level', 'OSC1 Octave', 'OSC1 Semi', 'OSC1 Detune', 'OSC1 Shape', 'OSC1 Balance', 'PEG1 Amount', 'PEG1 Time')
ALG_BANK2 = ('F1 Type', 'F1 Freq', 'F1 Resonance', 'F1 To F2', 'FEG1 Attack', 'FEG1 Decay', 'FEG1 Sustain', 'FEG1 Rel')
ALG_BANK3 = ('AMP1 Level', 'AMP1 Pan', 'AEG1 S Time', 'AEG1 Loop', 'AEG1 Attack', 'AEG1 Decay', 'AEG1 Sustain', 'AEG1 Rel')
ALG_BANK4 = ('LFO1 Shape', 'LFO1 Sync', 'LFO1 SncRate', 'LFO1 Speed', 'LFO1 PW', 'LFO1 Phase', 'LFO1 Delay', 'LFO1 Fade In')
ALG_BANK5 = ('OSC2 Level', 'OSC2 Octave', 'OSC2 Semi', 'OSC2 Detune', 'OSC2 Shape', 'OSC2 Balance', 'PEG2 Amount', 'PEG2 Time')
ALG_BANK6 = ('F2 Type', 'F2 Freq', 'F2 Resonance', 'F2 Slave', 'FEG2 Attack', 'FEG2 Decay', 'FEG2 Sustain', 'FEG2 Rel')
ALG_BANK7 = ('AMP2 Level', 'AMP2 Pan', 'AEG2 S Time', 'AEG2 Loop', 'AEG2 Attack', 'AEG2 Decay', 'AEG2 Sustain', 'AEG2 Rel')
ALG_BANK8 = ('LFO2 Shape', 'LFO2 Sync', 'LFO2 SncRate', 'LFO2 Speed', 'LFO2 PW', 'LFO2 Phase', 'LFO2 Delay', 'LFO2 Fade In')
ALG_BANK9 = ('OSC1 < LFO', 'OSC1 PW', 'O1 PW < LFO', 'LFO1 On/Off', 'F1 Freq < LFO', 'F1 Res < LFO', 'A1 Pan < LFO', 'AMP1 < LFO')
ALG_BANK10 = ('OSC2 < LFO', 'OSC2 PW', 'O2 PW < LFO', 'LFO2 On/Off', 'F2 Freq < LFO', 'F2 Res < LFO', 'A2 Pan < LFO', 'AMP2 < LFO')
ALG_BANK11 = ('Noise On/Off', 'Noise Level', 'Noise Balance', 'Noise Color', 'O1 Sub/Sync', 'O2 Sub/Sync', 'F1 Drive', 'F2 Drive')
ALG_BANK12 = ('Vib On/Off', 'Vib Amount', 'Vib Speed', 'Vib Delay', 'Vib Error', 'Vib Fade-In', 'Vib < ModWh', 'Volume')
ALG_BANK13 = ('Unison On/Off', 'Unison Detune', 'Unison Delay', 'Unison Voices', 'Glide On/Off', 'Glide Time', 'Glide Legato', 'Glide Mode')
ALG_BANK14 = ('Noise On/Off', 'OSC2 On/Off', 'F1 On/Off', 'F2 On/Off', 'AMP1 On/Off', 'AMP2 On/Off', 'LFO1 On/Off', 'LFO2 On/Off')
ALG_BANK15 = ('FEG1 Attack', 'FEG1 Decay', 'FEG1 Sustain', 'FEG1 Rel', 'FEG2 Attack', 'FEG2 Decay', 'FEG2 Sustain', 'FEG2 Rel')
ALG_BANK16 = ('AEG1 Attack', 'AEG1 Decay', 'AEG1 Sustain', 'AEG1 Rel', 'AEG2 Attack', 'AEG2 Decay', 'AEG2 Sustain', 'AEG2 Rel')
ALG_BANK17 = ('LFO1 SncRate', 'LFO1 Speed', 'LFO1 Fade In', 'LFO1 Phase', 'LFO2 SncRate', 'LFO2 Speed', 'LFO2 Fade In', 'LFO2 Phase')
ALG_BANK18 = ('PEG1 Amount', 'PEG1 Time', 'OSC1 Semi', 'OSC1 Level', 'PEG2 Amount', 'PEG2 Time', 'OSC2 Semi', 'OSC2 Level')
ALG_BOB = ('OSC1 Level', 'OSC1 Semi', 'OSC1 Balance', 'F1 Freq', 'OSC2 Level', 'OSC2 Semi', 'OSC2 Balance', 'F2 Freq')
ALG_BANKS = (ALG_BANK1,
ALG_BANK2,
ALG_BANK3,
ALG_BANK4,
ALG_BANK5,
ALG_BANK6,
ALG_BANK7,
ALG_BANK8,
ALG_BANK9,
ALG_BANK10,
ALG_BANK11,
ALG_BANK12,
ALG_BANK13,
ALG_BANK14,
ALG_BANK15,
ALG_BANK16,
ALG_BANK17,
ALG_BANK18)
ALG_BOBS = (ALG_BOB,)
ALG_BNK_NAMES = ('Osc1', 'Filter1', 'Amp1', 'LFO1', 'Osc2', 'Filter2', 'Amp2', 'LFO2', 'LFO1Rout', 'LFO2Rout', 'NsSubDrv', 'Vibrato', 'UniGlide', 'ON', '2FiltEnv.', '2AmpEnv.', '2LFOs', '2Osc')
COL_BANK1 = ('Mallet Volume', 'Mallet Volume < Key', 'Mallet Volume < Vel', 'Mallet Noise Amount', 'Mallet Noise Amount < Key', 'Mallet Noise Amount < Vel', 'Mallet Stiffness', 'Mallet Noise Color')
COL_BANK2 = ('Noise Volume', 'Noise Filter Type', 'Noise Filter Freq', 'Noise Filter Q', 'Noise Attack', 'Noise Decay', 'Noise Sustain', 'Noise Release')
COL_BANK3 = ('Res 1 On/Off', 'Res 1 Tune', 'Res 1 Fine Tune', 'Res 1 Pitch Env.', 'Res 1 Pitch Env. Time', 'Res 1 Bleed', 'Panorama', 'Res 1 Volume')
COL_BANK4 = ('Res 2 On/Off', 'Res 2 Tune', 'Res 2 Fine Tune', 'Res 2 Pitch Env.', 'Res 2 Pitch Env. Time', 'Res 2 Bleed', 'Panorama', 'Res 2 Volume')
COL_BANK5 = ('Res 1 Type', 'Res 1 Ratio', 'Res 1 Brightness', 'Res 1 Opening', 'Res 1 Inharmonics', 'Res 1 Listening L', 'Res 1 Listening R', 'Res 1 Hit')
COL_BANK6 = ('Res 2 Type', 'Res 2 Ratio', 'Res 2 Brightness', 'Res 2 Opening', 'Res 2 Inharmonics', 'Res 2 Listening L', 'Res 2 Listening R', 'Res 2 Hit')
COL_BANK7 = ('Res 1 Decay', 'Res 1 Radius', 'Res 1 Material', 'Res 1 Quality', 'Res 2 Decay', 'Res 2 Material', 'Res 2 Radius', 'Res 2 Quality')
COL_BANK8 = ('LFO 1 Shape', 'LFO 1 Sync', 'LFO 1 Sync Rate', 'LFO 1 Rate', 'LFO 1 Destination A', 'LFO 1 Destination A Amount', 'LFO 1 Destination B', 'LFO 1 Destination B Amount')
COL_BANK9 = ('LFO 2 Shape', 'LFO 2 Sync', 'LFO 2 Sync Rate', 'LFO 2 Rate', 'LFO 2 Destination A', 'LFO 2 Destination A Amount', 'LFO 2 Destination B', 'LFO 2 Destination B Amount')
COL_BANK10 = ('Mallet Volume', 'Mallet Noise Amount', 'Noise Volume', 'Res 1 Volume', 'Res 2 Volume', 'Structure', 'Voices', 'Volume')
COL_BANK11 = ('PB Destination A', 'PB Destination A Amount', 'MW Destination A', 'MW Destination A Amount', 'AT Destination A', 'AT Destination A Amount', 'AT Destination B', 'AT Destination B Amount')
COL_BANK12 = ('Mallet Volume < Vel', 'Mallet Stiffness < Vel', 'Mallet Noise Amount < Vel', 'Noise Volume < Vel', 'Noise Freq < Vel', '', '', '')
COL_BANK13 = ('Res 1 Pitch Env. < Vel', 'Res 1 Decay < Vel', 'Res 1 Material < Vel', 'Res 1 Radius < Vel', 'Res 1 Inharmonics < Vel', 'Res 1 Opening < Vel', '', '')
COL_BANK14 = ('Res 2 Pitch Env. < Vel', 'Res 2 Decay < Vel', 'Res 2 Material < Vel', 'Res 2 Radius < Vel', 'Res 2 Inharmonics < Vel', 'Res 2 Opening < Vel', '', '')
COL_BOB = ('Res 1 Brightness', 'Res 1 Type', 'Mallet Stiffness', 'Mallet Noise Amount', 'Res 1 Inharmonics', 'Res 1 Decay', 'Res 1 Tune', 'Volume')
COL_BANKS = (COL_BANK1,
COL_BANK2,
COL_BANK3,
COL_BANK4,
COL_BANK5,
COL_BANK6,
COL_BANK7,
COL_BANK8,
COL_BANK9,
COL_BANK10,
COL_BANK11,
COL_BANK12,
COL_BANK13,
COL_BANK14)
COL_BOBS = (COL_BOB,)
COL_BNK_NAMES = ('Mallet', 'Noise', 'Res1 Pitch/Mix', 'Res2 Pitch/Mix', 'Res1 Material', 'Res2 Material', 'Res1+2 Decay/Quality', 'LFO 1', 'LFO 2', 'All Vol/Global', 'PB/MW/AT', 'Ml/Ns Vel', 'Res1 Vel', 'Res2 Vel')
ELC_BANK1 = ('M Stiffness', 'M Stiff < Vel', 'M Force', 'M Force < Vel', 'Noise Pitch', 'Noise Decay', 'Noise Amount', 'Noise < Key')
ELC_BANK2 = ('F Tine Color', 'F Tine Decay', 'F Tine Vol', 'F Tine < Key', 'F Tone Decay', 'F Release', 'F Tone Vol', 'Volume')
ELC_BANK3 = ('Damp Tone', 'Damp Balance', 'Damp Amount', 'P Symmetry', 'P Amp In', 'P Distance', 'P Amp Out', 'P Amp < Key')
ELC_BANK4 = ('Voices', 'Semitone', 'Detune', 'KB Stretch', 'PB Range', 'M Stiff < Key', 'M Force < Key', 'Volume')
ELC_BANK5 = ('Noise Amount', 'F Tine Vol', 'F Tone Vol', 'Damp Amount', 'P Amp In', 'P Amp Out', '', 'Volume')
ELC_BOB = ('M Stiffness', 'M Force', 'Noise Amount', 'F Tine Vol', 'F Tone Vol', 'F Release', 'P Symmetry', 'Volume')
ELC_BANKS = (ELC_BANK1,
ELC_BANK2,
ELC_BANK3,
ELC_BANK4,
ELC_BANK5)
ELC_BOBS = (ELC_BOB,)
ELC_BNK_NAMES = ('Mallet', 'TineTone', 'DampPick', 'Global', 'AllLevel')
IMP_BANK1 = ('1 Start', '1 Transpose', '1 Stretch Factor', '1 Saturator Drive', '1 Filter Freq', '1 Filter Res', '1 Pan', '1 Volume')
IMP_BANK2 = ('2 Start', '2 Transpose', '2 Stretch Factor', '2 Saturator Drive', '2 Filter Freq', '2 Filter Res', '2 Pan', '2 Volume')
IMP_BANK3 = ('3 Start', '3 Transpose', '3 Stretch Factor', '3 Saturator Drive', '3 Filter Freq', '3 Filter Res', '3 Pan', '3 Volume')
IMP_BANK4 = ('4 Start', '4 Transpose', '4 Stretch Factor', '4 Saturator Drive', '4 Filter Freq', '4 Filter Res', '4 Pan', '4 Volume')
IMP_BANK5 = ('5 Start', '5 Transpose', '5 Stretch Factor', '5 Saturator Drive', '5 Filter Freq', '5 Filter Res', '5 Pan', '5 Volume')
IMP_BANK6 = ('6 Start', '6 Transpose', '6 Stretch Factor', '6 Saturator Drive', '6 Filter Freq', '6 Filter Res', '6 Pan', '6 Volume')
IMP_BANK7 = ('7 Start', '7 Transpose', '7 Stretch Factor', '7 Saturator Drive', '7 Filter Freq', '7 Filter Res', '7 Pan', '7 Volume')
IMP_BANK8 = ('8 Start', '8 Transpose', '8 Stretch Factor', '8 Saturator Drive', '8 Filter Freq', '8 Filter Res', '8 Pan', '8 Volume')
IMP_BANK9 = ('1 Transpose <- Vel', '1 Transpose <- Random', '1 Stretch <- Vel', '1 Filter <- Vel', '1 Filter <- Random', '1 Pan <- Vel', '1 Pan <- Random', '1 Volume <- Vel')
IMP_BANK10 = ('2 Transpose <- Vel', '2 Transpose <- Random', '2 Stretch <- Vel', '2 Filter <- Vel', '2 Filter <- Random', '2 Pan <- Vel', '2 Pan <- Random', '2 Volume <- Vel')
IMP_BANK11 = ('3 Transpose <- Vel', '3 Transpose <- Random', '3 Stretch <- Vel', '3 Filter <- Vel', '3 Filter <- Random', '3 Pan <- Vel', '3 Pan <- Random', '3 Volume <- Vel')
IMP_BANK12 = ('4 Transpose <- Vel', '4 Transpose <- Random', '4 Stretch <- Vel', '4 Filter <- Vel', '4 Filter <- Random', '4 Pan <- Vel', '4 Pan <- Random', '4 Volume <- Vel')
IMP_BANK13 = ('5 Transpose <- Vel', '5 Transpose <- Random', '5 Stretch <- Vel', '5 Filter <- Vel', '5 Filter <- Random', '5 Pan <- Vel', '5 Pan <- Random', '5 Volume <- Vel')
IMP_BANK14 = ('6 Transpose <- Vel', '6 Transpose <- Random', '6 Stretch <- Vel', '6 Filter <- Vel', '6 Filter <- Random', '6 Pan <- Vel', '6 Pan <- Random', '6 Volume <- Vel')
IMP_BANK15 = ('7 Transpose <- Vel', '7 Transpose <- Random', '7 Stretch <- Vel', '7 Filter <- Vel', '7 Filter <- Random', '7 Pan <- Vel', '7 Pan <- Random', '7 Volume <- Vel')
IMP_BANK16 = ('8 Transpose <- Vel', '8 Transpose <- Random', '8 Stretch <- Vel', '8 Filter <- Vel', '8 Filter <- Random', '8 Pan <- Vel', '8 Pan <- Random', '8 Volume <- Vel')
IMP_BOB = ('Global Time', 'Global Transpose', '1 Transpose', '2 Transpose', '3 Transpose', '4 Transpose', '5 Transpose', '6 Transpose')
IMP_BANKS = (IMP_BANK1,
IMP_BANK2,
IMP_BANK3,
IMP_BANK4,
IMP_BANK5,
IMP_BANK6,
IMP_BANK7,
IMP_BANK8,
IMP_BANK9,
IMP_BANK10,
IMP_BANK11,
IMP_BANK12,
IMP_BANK13,
IMP_BANK14,
IMP_BANK15,
IMP_BANK16)
IMP_BOBS = (IMP_BOB,)
IMP_BNK_NAMES = ('Pad 1', 'Pad 2', 'Pad 3', 'Pad 4', 'Pad 5', 'Pad 6', 'Pad 7', 'Pad 8', 'Pad1Rand', 'Pad2Rand', 'Pad3Rand', 'Pad4Rand', 'Pad5Rand', 'Pad6Rand', 'Pad7Rand', 'Pad8Rand')
OPR_BANK1 = ('A Fix Freq', 'A Fix Freq Mul', 'A Coarse', 'A Fine', 'A Fix On ', 'Osc-A On', 'Osc-A Wave', 'Osc-A Level')
OPR_BANK2 = ('Ae Mode', 'Osc-A Phase', 'Ae Init', 'Ae Peak', 'Ae Attack', 'Ae Decay', 'Ae Sustain', 'Ae Release')
OPR_BANK3 = ('B Fix Freq', 'B Fix Freq Mul', 'B Coarse', 'B Fine', 'B Fix On ', 'Osc-B On', 'Osc-B Wave', 'Osc-B Level')
OPR_BANK4 = ('Be Mode', 'Osc-B Phase', 'Be Init', 'Be Peak', 'Be Attack', 'Be Decay', 'Be Sustain', 'Be Release')
OPR_BANK5 = ('C Fix Freq', 'C Fix Freq Mul', 'C Coarse', 'C Fine', 'C Fix On ', 'Osc-C On', 'Osc-C Wave', 'Osc-C Level')
OPR_BANK6 = ('Ce Mode', 'Osc-C Phase', 'Ce Init', 'Ce Peak', 'Ce Attack', 'Ce Decay', 'Ce Sustain', 'Ce Release')
OPR_BANK7 = ('D Fix Freq', 'D Fix Freq Mul', 'D Coarse', 'D Fine', 'D Fix On ', 'Osc-D On', 'Osc-D Wave', 'Osc-D Level')
OPR_BANK8 = ('De Mode', 'Osc-D Phase', 'De Init', 'De Peak', 'De Attack', 'De Decay', 'De Sustain', 'De Release')
OPR_BANK9 = ('Filter On', 'Filter Type', 'Filter Freq', 'Filter Res', 'Shaper Type', 'Shaper Amt', 'Fe Amount', 'Filt < LFO')
OPR_BANK10 = ('Fe Mode', 'Fe End', 'Fe Init', 'Fe Peak', 'Fe Attack', 'Fe Decay', 'Fe Sustain', 'Fe Release')
OPR_BANK11 = ('Pe Amount', 'Pe End', 'Pe Init', 'Pe Peak', 'Pe Attack', 'Pe Decay', 'Pe Sustain', 'Pe Release')
OPR_BANK12 = ('Osc-A < Pe', 'Osc-B < Pe', 'Osc-C < Pe', 'Osc-D < Pe', 'LFO < Pe', 'Pe Amt A', 'Pe Dst B', 'Pe Amt B')
OPR_BANK13 = ('LFO On', 'LFO Type', 'LFO Range', 'LFO Sync', 'LFO Rate', 'LFO Amt', 'LFO Retrigger', 'LFO < Vel')
OPR_BANK14 = ('Le Mode', 'Le End', 'Le Init', 'Le Peak', 'Le Attack', 'Le Decay', 'Le Sustain', 'Le Release')
OPR_BANK15 = ('Osc-A < LFO', 'Osc-B < LFO', 'Osc-C < LFO', 'Osc-D < LFO', 'Filt < LFO', 'LFO Amt A', 'LFO Dst B', 'LFO Amt B')
OPR_BANK16 = ('Ae Loop', 'Ae Retrig', 'Fe Loop', 'Fe Retrig', 'Pe Loop', 'Pe Retrig', 'Le Loop', 'Le Retrig')
OPR_BANK17 = ('Glide On', 'Glide Time', 'Spread', 'Transpose', 'Algorithm', 'Time', 'Tone', 'Volume')
OPR_BANK18 = ('Ae Decay', 'Be Decay', 'Ce Decay', 'De Decay', 'Fe Decay', 'Pe Decay', 'Le Decay', '')
OPR_BANK19 = ('Ae Attack', 'Be Attack', 'Ce Attack', 'De Attack', 'Fe Attack', 'Pe Attack', 'Le Attack', '')
OPR_BANK20 = ('Ae Sustain', 'Be Sustain', 'Ce Sustain', 'De Sustain', 'Fe Sustain', 'Pe Sustain', 'Le Sustain', '')
OPR_BANK21 = ('Ae Release', 'Be Release', 'Ce Release', 'De Release', 'Fe Release', 'Pe Release', 'Le Release', '')
OPR_BANK22 = ('A Coarse', 'A Fine', 'B Coarse', 'B Fine', 'C Coarse', 'C Fine', 'D Coarse', 'D Fine')
OPR_BANK23 = ('A Fix Freq', 'A Fix Freq Mul', 'B Fix Freq', 'B Fix Freq Mul', 'C Fix Freq', 'C Fix Freq Mul', 'D Fix Freq', 'D Fix Freq Mul')
OPR_BANK24 = ('Osc-A Level', 'Osc-A Wave', 'Osc-B Level', 'Osc-B Wave', 'Osc-C Level', 'Osc-C Wave', 'Osc-D Level', 'Osc-D Wave')
OPR_BANK25 = ('Osc-A Feedb', 'Osc-A Phase', 'Osc-B Feedb', 'Osc-B Phase', 'Osc-C Feedb', 'Osc-C Phase', 'Osc-D Feedb', 'Osc-D Phase')
OPR_BOB = ('LFO Amt', 'Filter Freq', 'Fe Amount', 'Pe Amount', 'Algorithm', 'Transpose', 'Time', 'Volume')
OPR_BANKS = (OPR_BANK1,
OPR_BANK2,
OPR_BANK3,
OPR_BANK4,
OPR_BANK5,
OPR_BANK6,
OPR_BANK7,
OPR_BANK8,
OPR_BANK9,
OPR_BANK10,
OPR_BANK11,
OPR_BANK12,
OPR_BANK13,
OPR_BANK14,
OPR_BANK15,
OPR_BANK16,
OPR_BANK17,
OPR_BANK19,
OPR_BANK18,
OPR_BANK20,
OPR_BANK21,
OPR_BANK22,
OPR_BANK23,
OPR_BANK24,
OPR_BANK25)
OPR_BOBS = (OPR_BOB,)
OPR_BNK_NAMES = ('OscA', 'OscA Env', 'OscB', 'OscB Env', 'OscC', 'OscC Env', 'OscD', 'OscD Env', 'Filter', 'FiltEnv', 'PitchEnv', 'PtchDest', 'LFO', 'LFO Env', 'LFO Dest', 'EnvLoop', 'Misc', 'All Att', 'All Dec', 'All Sust', 'All Rel', 'CrseFine', 'FreqMult', 'LevPhase', 'FB/Phase')
SAM_BANK1 = ('Spread', 'Transpose', 'Detune', 'Glide Time', 'Pe < Env', 'Pe Mode', 'Pe Loop', 'Pe Retrig')
SAM_BANK2 = ('Pe Retrig', 'Pe End', 'Pe Init', 'Pe Peak', 'Pe Attack', 'Pe Decay', 'Pe Sustain', 'Pe Release')
SAM_BANK3 = ('O Type', 'O Volume', 'O Mode', 'O Fix On', 'O Coarse', 'O Fine', 'O Fix Freq', 'O Fix Freq Mul')
SAM_BANK4 = ('Oe Mode', 'Oe End', 'Oe Init', 'Oe Peak', 'Oe Attack', 'Oe Decay', 'Oe Sustain', 'Oe Release')
SAM_BANK5 = ('Filter Type', 'Filter Freq', 'Filter Res', 'Filter Morph', 'Shaper On', 'Shaper Pre-Filter', 'Shaper Type', 'Shaper Amt')
SAM_BANK6 = ('Fe < Env', 'Fe End', 'Fe Init', 'Fe Peak', 'Fe Attack', 'Fe Decay', 'Fe Sustain', 'Fe Release')
SAM_BANK7 = ('Ve Retrig', 'Ve Loop', 'Ve Init', 'Ve Peak', 'Ve Attack', 'Ve Decay', 'Ve Sustain', 'Ve Release')
SAM_BANK8 = ('Ae On', 'Ae End', 'Ae Init', 'Ae Peak', 'Ae Attack', 'Ae Decay', 'Ae Sustain', 'Ae Release')
SAM_BANK9 = ('L 1 On', 'L 1 Sync', 'L 1 Sync Rate', 'L 1 Rate', 'L 1 Attack', 'L 1 Retrig', 'L 1 Offset', 'L 1 Wave')
SAM_BANK10 = ('L 1 Wave', 'L 1 Sync', 'L 1 Sync Rate', 'L 1 Rate', 'Vol < LFO', 'Pan < LFO', 'Filt < LFO', 'Pitch < LFO')
SAM_BANK11 = ('L 2 On', 'L 2 Sync', 'L 2 Sync Rate', 'L 2 Rate', 'L 2 Attack', 'L 2 Retrig', 'L 2 Offset', 'L 2 Wave')
SAM_BANK12 = ('L 3 On', 'L 3 Sync', 'L 3 Sync Rate', 'L 3 Rate', 'L 3 Attack', 'L 3 Retrig', 'L 3 Offset', 'L 3 Wave')
SAM_BANK13 = ('L 2 St Mode', 'L 2 Phase', 'L 2 Spin', 'L 2 Width', 'L 3 St Mode', 'L 3 Phase', 'L 3 Spin', 'L 3 Width')
SAM_BANK14 = ('Oe Mode', 'Oe Retrig', 'Oe Loop', 'Ae Mode', 'Ae Retrig', 'Pe Mode', 'Pe Retrig', 'Pe Loop')
SAM_BANK15 = ('Fe Mode', 'Fe Retrig', 'Fe Loop', 'Ae Mode', 'Ae Loop', 'Ve Mode', 'Ve Retrig', 'Ve Loop')
SAM_BANK16 = ('Osc On', 'Pe On', 'F On', 'Fe On', 'Ae On', 'L 1 On', 'L 2 On', 'L 3 On')
SAM_BANK17 = ('Oe Attack', 'Oe Decay', 'Pe Attack', 'Pe Decay', 'Fe Attack', 'Fe Decay', 'Ve Attack', 'Ve Decay')
SAM_BANK18 = ('Oe Sustain', 'Oe Release', 'Pe Sustain', 'Pe Release', 'Fe Sustain', 'Fe Release', 'Ve Sustain', 'Ve Release')
SAM_BANK19 = ('L 1 Sync Rate', 'L 1 Rate', 'L 2 Sync Rate', 'L 2 Rate', 'L 3 Sync Rate', 'L 3 Rate', 'L 2 Phase', 'L 3 Phase')
SAM_BANK20 = ('L 1 Attack', 'L 1 Offset', 'L 2 Attack', 'L 2 Offset', 'L 3 Attack', 'L 3 Offset', 'L 2 Spin', 'L 3 Spin')
SAM_BOB = ('Volume', 'Sample Selector', 'Ve Init', 'Ve Peak', 'Ve Attack', 'Ve Decay', 'Ve Sustain', 'Ve Release')
SAM_BANKS = (SAM_BANK1,
SAM_BANK2,
SAM_BANK3,
SAM_BANK4,
SAM_BANK5,
SAM_BANK6,
SAM_BANK7,
SAM_BANK8,
SAM_BANK9,
SAM_BANK10,
SAM_BANK11,
SAM_BANK12,
SAM_BANK13,
SAM_BANK14,
SAM_BANK15,
SAM_BANK16,
SAM_BANK17,
SAM_BANK18,
SAM_BANK19,
SAM_BANK20)
SAM_BOBS = (SAM_BOB,)
SAM_BNK_NAMES = ('Pitch', 'PitchEnv', 'Osc', 'Osc Env', 'Filter', 'FiltEnv', 'VolEnv.', 'Aux Env.', 'LFO 1', 'LFO1Rout', 'LFO 2', 'LFO 3', 'LFO2/3St', 'LpOeAePe', 'LpFeAeVe', 'ON', 'AttDec', 'SustRel', 'LFORates', 'LFOAttOf')
SIM_BANK1 = ('S Start', 'S Loop Length', 'S Length', 'S Loop Fade', 'S Loop On', 'Snap', 'Pan', 'Volume')
SIM_BANK2 = ('Volume', 'Pan', 'Pan < Rnd', 'Vol < Vel', 'Ve Attack', 'Ve Decay', 'Ve Sustain', 'Ve Release')
SIM_BANK3 = ('Filter Type', 'Filter Freq', 'Filter Res', 'Fe < Env', 'Fe Attack', 'Fe Decay', 'Fe Sustain', 'Fe Release')
SIM_BANK4 = ('Transpose', 'Detune', 'Spread', 'Pe < Env', 'Pe Attack', 'Pe Decay', 'Pe Sustain', 'Pe Release')
SIM_BANK5 = ('L On', 'L Sync', 'L Sync Rate', 'L Rate', 'L Attack', 'L R < Key', 'L Offset', 'L Wave')
SIM_BANK6 = ('Glide Mode', 'Glide Time', 'Vol < Vel', 'Filt < Vel', 'Vol < LFO', 'Pan < LFO', 'Filt < LFO', 'Pitch < LFO')
SIM_BOB = ('Filter Freq', 'Filter Res', 'S Start', 'S Length', 'Ve Attack', 'Ve Release', 'Transpose', 'Volume')
SIM_BANKS = (SIM_BANK1,
SIM_BANK2,
SIM_BANK3,
SIM_BANK4,
SIM_BANK5,
SIM_BANK6)
SIM_BOBS = (SIM_BOB,)
SIM_BNK_NAMES = ('Loop', 'Volume', 'Filter', 'Pitch', 'LFO', 'LFORout')
TNS_BANK1 = ('Exc On/Off', 'Excitator Type', 'Exc Force', 'Exc Friction', 'Exc Velocity', 'E Pos', 'Exc Damping', 'E Pos Abs')
TNS_BANK2 = ('Damper On', 'Damper Gated', 'Damper Mass', 'D Stiffness', 'D Velocity', 'Damp Pos', 'D Damping', 'D Pos Abs')
TNS_BANK3 = ('Term On/Off', 'T Mass < Vel', 'T Mass < Key', 'Term Mass', 'Term Fng Stiff', 'Term Fret Stiff', '', 'Volume')
TNS_BANK4 = ('Body On/Off', 'Body Type', 'Body Size', 'Body Decay', 'Body Low-Cut', 'Body High-Cut', 'Body Mix', 'Volume')
TNS_BANK5 = ('String Decay', 'S Decay < Key', 'S Decay Ratio', 'Str Inharmon', 'Str Damping', 'S Damp < Key', 'Pickup On/Off', 'Pickup Pos')
TNS_BANK6 = ('Vibrato On/Off', 'Vib Delay', 'Vib Fade-In', 'Vib Speed', 'Vib Amount', 'Vib < ModWh', 'Vib Error', 'Volume')
TNS_BANK7 = ('Filter On/Off', 'Filter Type', 'Filter Freq', 'Filter Reso', 'Freq < Env', 'Reso < Env', 'Freq < LFO', 'Reso < LFO')
TNS_BANK8 = ('FEG On/Off', '', 'FEG Att < Vel', 'FEG < Vel', 'FEG Attack', 'FEG Decay', 'FEG Sustain', 'FEG Release')
TNS_BANK9 = ('LFO On/Off', 'LFO Sync On', 'LFO SyncRate', 'LFO Speed', 'LFO Delay', 'LFO Fade In', '', 'LFO Shape')
TNS_BANK10 = ('Octave', 'Semitone', 'Fine Tune', 'Voices', 'PB Depth', 'Stretch', 'Error', 'Key Priority')
TNS_BANK11 = ('Unison Voices', 'Uni Detune', 'Uni Delay', 'Porta On/Off', 'Porta Time', 'Porta Legato', 'Porta Prop', 'Volume')
TNS_BANK12 = ('Exc Force < Vel', 'Exc Force < Key', 'Exc Fric < Vel', 'Exc Fric < Key', 'Exc Vel < Vel', 'Exc Vel < Key', 'E Pos < Vel', 'E Pos < Key')
TNS_BANK13 = ('D Mass < Key', 'D Stiff < Key', 'D Velo < Key', 'D Pos < Key', 'T Mass < Vel', 'T Mass < Key', 'Freq < Key', 'Reso < Key')
TNS_BOB = ('Filter Freq', 'Filter Reso', 'Filter Type', 'Excitator Type', 'E Pos', 'String Decay', 'Str Damping', 'Volume')
TNS_BANKS = (TNS_BANK1,
TNS_BANK2,
TNS_BANK3,
TNS_BANK4,
TNS_BANK5,
TNS_BANK6,
TNS_BANK7,
TNS_BANK8,
TNS_BANK9,
TNS_BANK10,
TNS_BANK11,
TNS_BANK12,
TNS_BANK13)
TNS_BOBS = (TNS_BOB,)
TNS_BNK_NAMES = ('Excite', 'Damper', 'TermPick', 'Body', 'String', 'Vibrato', 'Filter', 'FiltEnv', 'LFO', 'Keyboard', 'UniPort', 'ExVelKey', 'RestKey')
ARP_BANK1 = ('Style', 'Groove', 'Offset', 'Hold On', '', '', '', 'Device On')
ARP_BANK2 = ('Sync On', 'Free Rate', 'Synced Rate', 'Gate', 'Retrigger Mode', 'Ret. Interval', 'Repeats', 'Device On')
ARP_BANK3 = ('Tranpose Mode', 'Tranpose Key', 'Transp. Steps', 'Transp. Dist.', 'Velocity Decay', 'Velocity Target', 'Velocity On', 'Vel. Retrigger')
ARP_BOB = ('Sync On', 'Free Rate', 'Synced Rate', 'Gate', 'Groove', 'Transp. Steps', 'Transp. Dist.', 'Velocity Decay')
ARP_BANKS = (ARP_BANK1, ARP_BANK2, ARP_BANK3)
ARP_BOBS = (ARP_BOB,)
ARP_BNK_NAMES = ('Style', 'Rate/Retrigger', 'Transp./Vel.')
CRD_BANK1 = ('Shift1', 'Shift2', 'Shift3', 'Shift4', 'Shift5', 'Shift6', '', 'Device On')
CRD_BANK2 = ('Velocity1', 'Velocity2', 'Velocity3', 'Velocity4', 'Velocity5', 'Velocity6', '', 'Device On')
CRD_BANK3 = ('Shift1', 'Velocity1', 'Shift2', 'Velocity2', 'Shift3', 'Velocity3', 'Shift4', 'Velocity4')
CRD_BOB = ('Shift1', 'Shift2', 'Shift3', 'Shift4', 'Velocity1', 'Velocity2', 'Velocity3', 'Velocity4')
CRD_BANKS = (CRD_BANK1, CRD_BANK2, CRD_BANK3)
CRD_BOBS = (CRD_BOB,)
CRD_BNK_NAMES = ('PtchShft', 'Velocity', 'StVel1-4')
NTL_BANK1 = ('Trigger Mode', 'Sync On', 'Synced Length', 'Time Length', 'Gate', 'On/Off-Balance', 'Decay Time', 'Decay Key Scale')
NTL_BANKS = (NTL_BANK1,)
NTL_BOBS = (NTL_BANK1,)
PIT_BANK1 = ('Pitch', 'Range', 'Lowest', '', '', '', '', 'Device On')
PIT_BANKS = (PIT_BANK1,)
PIT_BOBS = (PIT_BANK1,)
RND_BANK1 = ('Chance', 'Choices', 'Mode', 'Scale', 'Sign', '', '', 'Device On')
RND_BANKS = (RND_BANK1,)
RND_BOBS = (RND_BANK1,)
SCL_BANK1 = ('Base', 'Transpose', 'Range', 'Lowest', 'Fold', 'Map 0', 'Map 1', 'Device On')
SCL_BANK2 = ('Map 0', 'Map 1', 'Map 2', 'Map 3', 'Map 4', 'Map 5', 'Map 6', 'Map 7')
SCL_BANKS = (SCL_BANK1, SCL_BANK2)
SCL_BOBS = (SCL_BANK1,)
SCL_BNK_NAMES = ('Scale', 'Maps')
VEL_BANK1 = ('Mode', 'Drive', 'Compand', 'Random', 'Out Hi', 'Out Low', 'Range', 'Lowest')
VEL_BANKS = (VEL_BANK1,)
VEL_BOBS = (VEL_BANK1,)
AMP_BANK1 = ('Amp Type', 'Gain', 'Bass', 'Middle', 'Treble', 'Presence', 'Volume', 'Dry/Wet')
AMP_BANK2 = ('Dual Mono', '', '', '', '', '', '', '')
AMP_BANKS = (AMP_BANK1, AMP_BANK2)
AMP_BOBS = (AMP_BANK1,)
AMP_BNK_NAMES = ('Global', 'DualMono')
AFL_BANK1 = ('Env. Modulation', 'Env. Attack', 'Env. Release', 'Filter Type', 'Frequency', 'Resonance', 'LFO Quantize On', 'LFO Quantize Rate')
AFL_BANK2 = ('LFO Amount', 'LFO Sync', 'LFO Sync Rate', 'LFO Frequency', 'LFO Waveform', 'LFO Stereo Mode', 'LFO Phase', 'LFO Spin')
AFL_BANK3 = ('', '', '', '', 'LFO Offset', 'Ext. In On', 'Ext. In Gain', 'Ext. In Mix')
AFL_BOB = ('Frequency', 'Resonance', 'Filter Type', 'Env. Modulation', 'LFO Amount', 'LFO Waveform', 'LFO Frequency', 'LFO Phase')
AFL_BANKS = (AFL_BANK1, AFL_BANK2, AFL_BANK3)
AFL_BOBS = (AFL_BOB,)
AFL_BNK_NAMES = ('Env/Filt', 'LFO', 'SideChain')
APN_BANK1 = ('LFO Type', 'Frequency', 'Stereo Mode', 'Spin', 'Amount', 'Sync Rate', 'Phase', 'Offset')
APN_BANK2 = ('Waveform', 'Shape', 'Width (Random)', 'Invert', '', '', '', 'Device On')
APN_BOB = ('LFO Type', 'Frequency', 'Stereo Mode', 'Spin', 'Amount', 'Sync Rate', 'Phase', 'Offset')
APN_BANKS = (APN_BANK1, APN_BANK2)
APN_BOBS = (APN_BOB,)
APN_BNK_NAMES = ('LFORates', 'LFO Wave')
BRP_BANK1 = ('Interval', 'Offset', 'Chance', 'Gate', 'Repeat', '', '', 'Device On')
BRP_BANK2 = ('Grid', 'Variation', 'Variation Type', 'Block Triplets', 'Pitch', 'Pitch Decay', '', '')
BRP_BANK3 = ('Filter Freq', 'Filter Width', '', 'Filter On', 'Volume', 'Decay', '', 'Mix Type')
BRP_BOB = ('Grid', 'Interval', 'Offset', 'Gate', 'Pitch', 'Pitch Decay', 'Filter Freq', 'Decay')
BRP_BANKS = (BRP_BANK1, BRP_BANK2, BRP_BANK3)
BRP_BOBS = (BRP_BOB,)
BRP_BNK_NAMES = ('RepeatRt', 'GridPitch', 'Filt/Mix')
CAB_BANK1 = ('Cabinet Type', 'Microphone Position', 'Microphone Type', 'Dual Mono', '', '', 'Device On', 'Dry/Wet')
CAB_BANKS = (CAB_BANK1,)
CAB_BOBS = (CAB_BANK1,)
CHR_BANK1 = ('Delay 1 HiPass', 'Delay 1 Time', 'Delay 2 Time', 'Link On', 'Delay 2 Mode', 'LFO Amount', 'LFO Extend On', 'LFO Rate')
CHR_BANK2 = ('Feedback', '', '', 'Dry/Wet', 'Polarity', '', '', 'Device On')
CHR_BOB = ('Delay 1 HiPass', 'Delay 1 Time', 'Delay 2 Time', 'Feedback', 'Dry/Wet', 'LFO Amount', 'LFO Extend On', 'LFO Rate')
CHR_BANKS = (CHR_BANK1, CHR_BANK2)
CHR_BOBS = (CHR_BOB,)
CHR_BNK_NAMES = ('DelayMod', 'Mixer')
CP3_BANK1 = ('Ext. In On', 'Side Listen', 'Ext. In Gain', 'Ext. In Mix', 'EQ Mode', 'EQ Freq', 'EQ Q', 'EQ Gain')
CP3_BANK2 = ('Ratio', 'Expansion Ratio', 'Attack', 'Release', 'Threshold', 'Output Gain', 'Knee', 'LookAhead')
CP3_BANK3 = ('EQ On', 'Auto Release On/Off', 'Env Mode', 'Makeup', 'Model', '', '', 'Dry/Wet')
CP3_BOB = ('Ratio', 'Expansion Ratio', 'Attack', 'Release', 'Threshold', 'Output Gain', 'Knee', 'Dry/Wet')
CP3_BANKS = (CP3_BANK1, CP3_BANK2, CP3_BANK3)
CP3_BOBS = (CP3_BOB,)
CP3_BNK_NAMES = ('SideChain', 'Compress', 'Output')
CRP_BANK1 = ('Tune', 'Fine', 'Spread', 'Dry Wet', 'Mid Freq', 'Width', 'Bleed', 'Width')
CRP_BANK2 = ('LFO Sync', 'LFO Sync Rate', 'LFO Rate', 'LFO Amount', 'LFO Stereo Mode', 'Phase', 'Spin', 'LFO Shape')
CRP_BANK3 = ('Resonance Type', 'Listening L', 'Listening R', 'Hit', 'Inharmonics', 'Decay', 'Material', 'Brightness')
CRP_BANK4 = ('Transpose', 'Opening', 'Radius', 'Ratio', 'MIDI Frequency', 'PB Range', 'Note Off', 'Off Decay')
CRP_BOB = ('Brightness', 'Resonance Type', 'Material', 'Inharmonics', 'Decay', 'Ratio', 'Tune', 'Dry Wet')
CRP_BANKS = (CRP_BANK1, CRP_BANK2, CRP_BANK3, CRP_BANK4)
CRP_BOBS = (CRP_BOB,)
CRP_BNK_NAMES = ('TuneFilt', 'LFO', 'Body', 'SideChain')
DTB_BANK1 = ('Drive', 'Tube Type', 'Tone', 'Bias', 'Envelope', 'Attack', 'Release', 'Dry/Wet')
DTB_BANK2 = ('Output', '', '', '', '', '', '', 'Dry/Wet')
DTB_BOB = ('Drive', 'Tube Type', 'Tone', 'Bias', 'Envelope', 'Attack', 'Release', 'Dry/Wet')
DTB_BANKS = (DTB_BANK1, DTB_BANK2)
DTB_BOBS = (DTB_BOB,)
DTB_BNK_NAMES = ('Dynamics', 'Output')
EQ8_BANK1 = ('1 Filter On A', '2 Filter On A', '3 Filter On A', '4 Filter On A', '5 Filter On A', '6 Filter On A', '7 Filter On A', '8 Filter On A')
EQ8_BANK2 = ('1 Frequency A', '1 Resonance A', '1 Gain A', '1 Filter Type A', '8 Frequency A', '8 Resonance A', '8 Gain A', '8 Filter Type A')
EQ8_BANK3 = ('2 Frequency A', '2 Resonance A', '2 Gain A', '2 Filter Type A', '7 Frequency A', '7 Resonance A', '7 Gain A', '7 Filter Type A')
EQ8_BANK4 = ('3 Frequency A', '3 Resonance A', '3 Gain A', '3 Filter Type A', '6 Frequency A', '6 Resonance A', '6 Gain A', '6 Filter Type A')
EQ8_BANK5 = ('4 Frequency A', '4 Resonance A', '4 Gain A', '4 Filter Type A', '5 Frequency A', '5 Resonance A', '5 Gain A', '5 Filter Type A')
EQ8_BANK6 = ('1 Frequency A', '1 Gain A', '2 Frequency A', '2 Resonance A', '7 Frequency A', '7 Resonance A', '8 Frequency A', '8 Gain A')
EQ8_BANK7 = ('3 Frequency A', '3 Resonance A', '4 Frequency A', '4 Resonance A', '5 Frequency A', '5 Resonance A', '6 Frequency A', '6 Resonance A')
EQ8_BANK8 = ('', '', '', '', '', 'Adaptive Q', 'Scale', 'Output Gain')
EQ8_BANK9 = ('1 Filter On B', '2 Filter On B', '3 Filter On B', '4 Filter On B', '5 Filter On B', '6 Filter On B', '7 Filter On B', '8 Filter On B')
EQ8_BANK10 = ('1 Frequency A', '1 Resonance A', '1 Gain A', '1 Filter Type A', '1 Frequency B', '1 Resonance B', '1 Gain B', '1 Filter Type B')
EQ8_BANK11 = ('2 Frequency A', '2 Resonance A', '2 Gain A', '2 Filter Type A', '2 Frequency B', '2 Resonance B', '2 Gain B', '2 Filter Type B')
EQ8_BANK12 = ('3 Frequency A', '3 Resonance A', '3 Gain A', '3 Filter Type A', '3 Frequency B', '3 Resonance B', '3 Gain B', '3 Filter Type B')
EQ8_BANK13 = ('4 Frequency A', '4 Resonance A', '4 Gain A', '4 Filter Type A', '4 Frequency B', '4 Resonance B', '4 Gain B', '4 Filter Type B')
EQ8_BANK14 = ('5 Frequency A', '5 Resonance A', '5 Gain A', '5 Filter Type A', '5 Frequency B', '5 Resonance B', '5 Gain B', '5 Filter Type B')
EQ8_BANK15 = ('6 Frequency A', '6 Resonance A', '6 Gain A', '6 Filter Type A', '6 Frequency B', '6 Resonance B', '6 Gain B', '6 Filter Type B')
EQ8_BANK16 = ('7 Frequency A', '7 Resonance A', '7 Gain A', '7 Filter Type A', '7 Frequency B', '7 Resonance B', '7 Gain B', '7 Filter Type B')
EQ8_BANK17 = ('8 Frequency A', '8 Resonance A', '8 Gain A', '8 Filter Type A', '8 Frequency B', '8 Resonance B', '8 Gain B', '8 Filter Type B')
EQ8_BANK18 = ('1 Frequency A', '1 Resonance A', '1 Frequency B', '1 Resonance B', '8 Frequency A', '8 Resonance A', '8 Frequency B', '8 Resonance B')
EQ8_BANK19 = ('2 Frequency A', '2 Gain A', '2 Frequency B', '2 Gain B', '7 Frequency A', '7 Gain A', '7 Frequency B', '7 Gain B')
EQ8_BANK20 = ('3 Frequency A', '3 Gain A', '3 Frequency B', '3 Gain B', '6 Frequency A', '6 Gain A', '6 Frequency B', '6 Gain B')
EQ8_BANK21 = ('4 Frequency A', '4 Gain A', '4 Frequency B', '4 Gain B', '5 Frequency A', '5 Gain A', '5 Frequency B', '5 Gain B')
EQ8_BOB = ('1 Frequency A', '1 Gain A', '2 Frequency A', '2 Resonance A', '7 Frequency A', '7 Resonance A', '8 Frequency A', '8 Gain A')
EQ8_BANKS = (EQ8_BANK1,
EQ8_BANK2,
EQ8_BANK3,
EQ8_BANK4,
EQ8_BANK5,
EQ8_BANK6,
EQ8_BANK7,
EQ8_BANK8,
EQ8_BANK9,
EQ8_BANK10,
EQ8_BANK11,
EQ8_BANK12,
EQ8_BANK13,
EQ8_BANK14,
EQ8_BANK15,
EQ8_BANK16,
EQ8_BANK17,
EQ8_BANK18,
EQ8_BANK19,
EQ8_BANK20,
EQ8_BANK21)
EQ8_BOBS = (EQ8_BOB,)
EQ8_BNK_NAMES = ('SterAOn', 'SFilt1+8', 'SFilt2+7', 'SFilt3+6', 'SFilt4+5', 'S1/2+7/8', 'S3/4+5/6', 'OutputGn', 'M/S:B ON', 'MFlt1A+B', 'MFlt2A+B', 'MFlt3A+B', 'MFlt4A+B', 'MFlt5A+B', 'MFlt6A+B', 'MFlt7A+B', 'MFlt8A+B', 'MF1/8A+B', 'MF 2/7A+B', 'MF3/6A+B', 'MF4/5A+B')
EQ3_BANK1 = ('GainLo', 'GainMid', 'GainHi', 'LowOn', 'MidOn', 'HighOn', 'FreqLo', 'FreqHi')
EQ3_BANKS = (EQ3_BANK1,)
EQ3_BOBS = (EQ3_BANK1,)
ERO_BANK1 = ('Mode', 'Frequency', 'Width', 'Amount', '', '', '', '')
ERO_BANKS = (ERO_BANK1,)
ERO_BOBS = (ERO_BANK1,)
FLD_BANK1 = ('1 Input On', '1 Filter Freq', '1 Filter Width', '1 Delay Mode', '1 Beat Delay', '1 Feedback', '1 Pan', '1 Volume')
FLD_BANK2 = ('2 Input On', '2 Filter Freq', '2 Filter Width', '2 Delay Mode', '2 Beat Delay', '2 Feedback', '2 Pan', '2 Volume')
FLD_BANK3 = ('3 Input On', '3 Filter Freq', '3 Filter Width', '3 Delay Mode', '3 Beat Delay', '3 Feedback', '3 Pan', '3 Volume')
FLD_BANK4 = ('1 Filter Freq', '3 Filter Freq', '1 Beat Delay', '2 Beat Delay', '3 Beat Delay', '1 Feedback', '2 Feedback', '3 Feedback')
FLD_BANK5 = ('1 Pan', '3 Pan', '1 Volume', '2 Volume', '3 Volume', '1 Beat Swing', '2 Beat Swing', '3 Beat Swing')
FLD_BANK6 = ('1 Feedback', '3 Feedback', '1 Time Delay', '2 Time Delay', '3 Time Delay', '1 Pan', '3 Pan', 'Dry')
FLD_BOB = ('2 Filter Freq', '1 Beat Swing', '2 Beat Swing', '3 Beat Swing', '1 Feedback', '2 Feedback', '3 Feedback', 'Dry')
FLD_BANKS = (FLD_BANK1, FLD_BANK2, FLD_BANK3, FLD_BANK4, FLD_BANK5, FLD_BANK6)
FLD_BOBS = (FLD_BOB,)
FLD_BNK_NAMES = ('L Filter', 'L+R Filter', 'R Filter', 'FrqDlyFB', 'PanVolSw', 'FB/Dly/Pan')
FLG_BANK1 = ('Delay Time', 'Polarity', 'Feedback', 'Env. Modulation', 'Env. Attack', 'Env. Release', 'Hi Pass', 'Dry/Wet')
FLG_BANK2 = ('Sync', 'Frequency', 'Sync Rate', 'LFO Offset', 'LFO Stereo Mode', 'LFO Spin', 'LFO Amount', 'LFO Waveform')
FLG_BOB = ('Hi Pass', 'Delay Time', 'Frequency', 'Sync Rate', 'LFO Amount', 'Env. Modulation', 'Feedback', 'Dry/Wet')
FLG_BANKS = (FLG_BANK1, FLG_BANK2)
FLG_BOBS = (FLG_BOB,)
FLG_BNK_NAMES = ('DlyEnvMx', 'LFO/S&H')
FRS_BANK1 = ('Mode', 'Coarse', 'Ring Mod Frequency', 'Fine', 'Drive On/Off', 'Drive', 'Wide', 'Dry/Wet')
FRS_BANK2 = ('Sync', 'LFO Frequency', 'Sync Rate', 'LFO Offset', 'LFO Stereo Mode', 'LFO Spin', 'LFO Amount', 'LFO Waveform')
FRS_BANKS = (FRS_BANK1, FRS_BANK2)
FRS_BOBS = (FRS_BANK1,)
FRS_BNK_NAMES = ('FreqDrive', 'LFO/S&H')
GTE_BANK1 = ('Ext. In On', 'Side Listen', 'Ext. In Gain', 'Ext. In Mix', 'EQ Mode', 'EQ Freq', 'EQ Q', 'EQ Gain')
GTE_BANK2 = ('Threshold', 'Return', 'FlipMode', 'LookAhead', 'Attack', 'Hold', 'Release', 'Floor')
GTE_BOB = ('Ext. In Gain', 'Ext. In Mix', 'EQ On', 'EQ Freq', 'Threshold', 'Attack', 'Release', 'Floor')
GTE_BANKS = (GTE_BANK1, GTE_BANK2)
GTE_BOBS = (GTE_BOB,)
GTE_BNK_NAMES = ('SideChain', 'Gate')
GLU_BANK1 = ('Ext. In On', 'EQ On', 'Ext. In Gain', 'Ext. In Mix', 'EQ Mode', 'EQ Freq', 'EQ Q', 'EQ Gain')
GLU_BANK2 = ('Ratio', 'Peak Clip In', 'Attack', 'Release', 'Threshold', 'Makeup', 'Range', 'Dry/Wet')
GLU_BOB = ('Ratio', 'EQ Freq', 'Attack', 'Release', 'Threshold', 'Makeup', 'Range', 'Dry/Wet')
GLU_BANKS = (GLU_BANK1, GLU_BANK2)
GLU_BOBS = (GLU_BOB,)
GLU_BNK_NAMES = ('SideChain', 'Compress')
GRD_BANK1 = ('Frequency', 'Pitch', 'Time Delay', 'Beat Swing', 'Random', 'Spray', 'Feedback', 'DryWet')
GRD_BANK2 = ('Delay Mode', 'Beat Delay', '', '', '', '', '', 'Device On')
GRD_BANKS = (GRD_BANK1, GRD_BANK2)
GRD_BOBS = (GRD_BANK1,)
LPR_BANK1 = ('State', 'Quantization', 'Song Control', 'Tempo Control', 'Feedback', 'Monitor', 'Speed', 'Reverse')
LPR_BANKS = (LPR_BANK1,)
LPR_BOBS = (LPR_BANK1,)
MBD_BANK1 = ('Band Activator (High)', 'Mid-High Crossover', 'Input Gain (High)', 'Output Gain (High)', 'Master Output', 'Time Scaling', 'Amount', 'Soft Knee On/Off')
MBD_BANK2 = ('Attack Time (High)', 'Release Time (High)', 'Below Threshold (High)', 'Below Ratio (High)', 'Above Threshold (High)', 'Above Ratio (High)', 'Input Gain (High)', 'Output Gain (High)')
MBD_BANK3 = ('Band Activator (Mid)', '', 'Input Gain (Mid)', 'Output Gain (Mid)', 'Master Output', 'Time Scaling', 'Amount', 'Peak/RMS Mode')
MBD_BANK4 = ('Attack Time (Mid)', 'Release Time (Mid)', 'Below Threshold (Mid)', 'Below Ratio (Mid)', 'Above Threshold (Mid)', 'Above Ratio (Mid)', 'Input Gain (Mid)', 'Output Gain (Mid)')
MBD_BANK5 = ('Band Activator (Low)', 'Low-Mid Crossover', 'Input Gain (Low)', 'Output Gain (Low)', 'Master Output', 'Time Scaling', 'Amount', 'Soft Knee On/Off')
MBD_BANK6 = ('Attack Time (Low)', 'Release Time (Low)', 'Below Threshold (Low)', 'Below Ratio (Low)', 'Above Threshold (Low)', 'Above Ratio (Low)', 'Input Gain (Low)', 'Output Gain (Low)')
MBD_BANK7 = ('Attack Time (High)', 'Release Time (High)', 'Attack Time (Mid)', 'Release Time (Mid)', 'Attack Time (Low)', 'Release Time (Low)', 'Mid-High Crossover', 'Low-Mid Crossover')
MBD_BANK8 = ('Below Threshold (High)', 'Below Ratio (High)', 'Below Threshold (Mid)', 'Below Ratio (Mid)', 'Below Threshold (Low)', 'Below Ratio (Low)', 'Mid-High Crossover', 'Low-Mid Crossover')
MBD_BANK9 = ('Above Threshold (High)', 'Above Ratio (High)', 'Above Threshold (Mid)', 'Above Ratio (Mid)', 'Above Threshold (Low)', 'Above Ratio (Low)', 'Mid-High Crossover', 'Low-Mid Crossover')
MBD_BANK10 = ('Input Gain (High)', 'Output Gain (High)', 'Input Gain (Mid)', 'Output Gain (Mid)', 'Input Gain (Low)', 'Output Gain (Low)', 'Mid-High Crossover', 'Low-Mid Crossover')
MBD_BANK11 = ('Ext. In On', 'Ext. In Gain', 'Ext. In Mix', 'Soft Knee On/Off', 'Peak/RMS Mode', 'Master Output', 'Time Scaling', 'Amount')
MBD_BOB = ('Below Threshold (High)', 'Above Threshold (High)', 'Below Threshold (Mid)', 'Above Threshold (Mid)', 'Below Threshold (Low)', 'Above Threshold (Low)', 'Master Output', 'Amount')
MBD_BANKS = (MBD_BANK1,
MBD_BANK2,
MBD_BANK3,
MBD_BANK4,
MBD_BANK5,
MBD_BANK6,
MBD_BANK7,
MBD_BANK8,
MBD_BANK9,
MBD_BANK10,
MBD_BANK11)
MBD_BOBS = (MBD_BOB,)
MBD_BNK_NAMES = ('H:Filt+IO', 'H:T/B/A', 'M:Filt+IO', 'M: T/B/A', 'L:Filt+IO', 'L: T/B/A', 'All:Time', 'All:Below', 'All:Above', 'All:InOut', 'SdChnMix')
OVR_BANK1 = ('Filter Freq', 'Filter Width', 'Drive', 'Tone', 'Preserve Dynamics', '', '', 'Dry/Wet')
OVR_BANKS = (OVR_BANK1,)
OVR_BOBS = (OVR_BANK1,)
PHS_BANK1 = ('Poles', 'Color', 'Frequency', 'Feedback', 'Env. Modulation', 'Env. Attack', 'Env. Release', 'Dry/Wet')
PHS_BANK2 = ('LFO Sync', 'LFO Frequency', 'LFO Sync Rate', 'LFO Phase', 'LFO Stereo Mode', 'LFO Spin', 'LFO Amount', 'LFO Waveform')
PHS_BOB = ('Frequency', 'Feedback', 'Poles', 'Env. Modulation', 'Color', 'LFO Amount', 'LFO Frequency', 'Dry/Wet')
PHS_BANKS = (PHS_BANK1, PHS_BANK2)
PHS_BOBS = (PHS_BOB,)
PHS_BNK_NAMES = ('PFreqEnv', 'LFO/S&H')
PPG_BANK1 = ('Delay Mode', 'Beat Delay', 'Beat Swing', 'Time Delay', 'Filter Freq', 'Filter Width', 'Feedback', 'Dry/Wet')
PPG_BANKS = (PPG_BANK1,)
PPG_BOBS = (PPG_BANK1,)
RDX_BANK1 = ('Bit On', 'Bit Depth', 'Sample Mode', 'Sample Hard', 'Sample Soft', '', '', '')
RDX_BANKS = (RDX_BANK1,)
RDX_BOBS = (RDX_BANK1,)
RSN_BANK1 = ('Mode', 'I On', 'II On', 'III On', 'IV On', 'V On', 'Filter On', 'Dry/Wet')
RSN_BANK2 = ('Decay', 'I Note', 'II Pitch', 'III Pitch', 'IV Pitch', 'V Pitch', 'Frequency', 'Filter Type')
RSN_BANK3 = ('Const', 'I Tune', 'II Tune', 'III Tune', 'IV Tune', 'V Tune', 'Width', 'Dry/Wet')
RSN_BANK4 = ('Color', 'I Gain', 'II Gain', 'III Gain', 'IV Gain', 'V Gain', 'Global Gain', 'Dry/Wet')
RSN_BANK5 = ('Mode', 'Decay', 'Const', 'Color', 'I On', 'I Note', 'I Tune', 'I Gain')
RSN_BANK6 = ('II On', 'II Pitch', 'II Tune', 'II Gain', 'III On', 'III Pitch', 'III Tune', 'III Gain')
RSN_BANK7 = ('IV On', 'IV Pitch', 'IV Tune', 'IV Gain', 'V On', 'V Pitch', 'V Tune', 'V Gain')
RSN_BANK8 = ('Filter On', 'Frequency', 'Filter Type', 'Decay', 'Color', 'Width', 'Global Gain', 'Dry/Wet')
RSN_BOB = ('Frequency', 'Decay', 'Color', 'I Gain', 'II Gain', 'III Gain', 'Width', 'Dry/Wet')
RSN_BANKS = (RSN_BANK1,
RSN_BANK2,
RSN_BANK3,
RSN_BANK4,
RSN_BANK5,
RSN_BANK6,
RSN_BANK7,
RSN_BANK8)
RSN_BOBS = (RSN_BOB,)
RSN_BNK_NAMES = ('ON', 'Pitch', 'Tune', 'Gain', 'Mode I', 'Md:II+III', 'Md:IV+V', 'Filt/Mix')
RVB_BANK1 = ('In LowCut On', 'In HighCut On', 'ER Spin On', 'ER Shape', 'In Filter Freq', 'In Filter Width', 'ER Spin Rate', 'ER Spin Amount')
RVB_BANK2 = ('PreDelay', 'Room Size', 'Stereo Image', 'Chorus On', 'Chorus Rate', 'Chorus Amount', 'Density', 'Scale')
RVB_BANK3 = ('HiShelf On', 'HiShelf Freq', 'HiShelf Gain', 'LowShelf On', 'LowShelf Freq', 'LowShelf Gain', 'DecayTime', 'Freeze On')
RVB_BOB = ('PreDelay', 'ER Shape', 'Room Size', 'Stereo Image', 'Freeze On', 'ER Level', 'Diffuse Level', 'Dry/Wet')
RVB_BANKS = (RVB_BANK1, RVB_BANK2, RVB_BANK3)
RVB_BOBS = (RVB_BOB,)
RVB_BNK_NAMES = ('In/Reflc', 'GlobChrs', 'Diffusion')
SAT_BANK1 = ('Drive', 'Type', 'WS Drive', 'Color', 'Base', 'Frequency', 'Width', 'Depth')
SAT_BANK2 = ('Drive', 'Type', 'WS Drive', 'WS Curve', 'WS Depth', 'WS Lin', 'WS Damp', 'WS Period')
SAT_BOB = ('Drive', 'Type', 'Base', 'Frequency', 'Width', 'Depth', 'Output', 'Dry/Wet')
SAT_BANKS = (SAT_BANK1, SAT_BANK2)
SAT_BOBS = (SAT_BOB,)
SAT_BNK_NAMES = ('General', 'Waveshape')
SMD_BANK1 = ('L Delay Mode', 'L Beat Delay', 'L Beat Swing', 'L Time Delay', 'R Delay Mode', 'R Beat Delay', 'R Beat Swing', 'R Time Delay')
SMD_BOB = ('L Delay Mode', 'L Beat Delay', 'L Beat Swing', 'R Time Delay', 'R Beat Swing', 'Link On', 'Feedback', 'Dry/Wet')
SMD_BANKS = (SMD_BANK1,)
SMD_BOBS = (SMD_BOB,)
UTL_BANK1 = ('Gain', 'Mute', 'BlockDc', 'Signal Source', 'PhaseInvertL', 'PhaseInvertR', 'StereoSeparation', 'Panorama')
UTL_BANKS = (UTL_BANK1,)
UTL_BOBS = (UTL_BANK1,)
VDS_BANK1 = ('Tracing On', 'Tracing Drive', 'Tracing Freq.', 'Tracing Width', 'Pinch Soft On.', 'Pinch Mono On', 'Crackle Density', 'Crackle Volume')
VDS_BOB = ('Pinch Soft On.', 'Pinch Mono On', 'Pinch On', 'Crackle Density', 'Pinch Drive', 'Pinch Freq.', 'Pinch Width', 'Global Drive')
VDS_BANKS = (VDS_BANK1,)
VDS_BOBS = (VDS_BOB,)
VOC_BANK1 = ('Unvoiced Level', 'Ext. In Gain', 'Noise Rate', 'Noise Crackle', 'Upper Pitch Detection', 'Lower Pitch Detection', 'Oscillator Waveform', 'Oscillator Pitch')
VOC_BANK2 = ('Enhance', 'Unvoiced Sensitivity', 'Mono/Stereo', 'Envelope Depth', 'Attack Time', 'Release Time', 'Formant Shift', 'Dry/Wet')
VOC_BANK3 = ('Upper Filter Band', 'Lower Filter Band', 'Filter Bandwidth', 'Precise/Retro', 'Gate Threshold', 'Output Level', 'Envelope Depth', 'Dry/Wet')
VOC_BOB = ('Unvoiced Level', 'Filter Bandwidth', 'Gate Threshold', 'Formant Shift', 'Attack Time', 'Release Time', 'Envelope Depth', 'Dry/Wet')
VOC_BANKS = (VOC_BANK1, VOC_BANK2, VOC_BANK3)
VOC_BOBS = (VOC_BOB,)
VOC_BNK_NAMES = ('Carrier', 'Env/Mix', 'Filter')
DEVICE_DICT = {'AudioEffectGroupDevice': RCK_BANKS,
'MidiEffectGroupDevice': RCK_BANKS,
'InstrumentGroupDevice': RCK_BANKS,
'DrumGroupDevice': RCK_BANKS,
'InstrumentImpulse': IMP_BANKS,
'Operator': OPR_BANKS,
'UltraAnalog': ALG_BANKS,
'OriginalSimpler': SIM_BANKS,
'MultiSampler': SAM_BANKS,
'MidiArpeggiator': ARP_BANKS,
'LoungeLizard': ELC_BANKS,
'StringStudio': TNS_BANKS,
'Collision': COL_BANKS,
'MidiChord': CRD_BANKS,
'MidiNoteLength': NTL_BANKS,
'MidiPitcher': PIT_BANKS,
'MidiRandom': RND_BANKS,
'MidiScale': SCL_BANKS,
'MidiVelocity': VEL_BANKS,
'AutoFilter': AFL_BANKS,
'AutoPan': APN_BANKS,
'BeatRepeat': BRP_BANKS,
'Chorus': CHR_BANKS,
'Compressor2': CP3_BANKS,
'Corpus': CRP_BANKS,
'Eq8': EQ8_BANKS,
'FilterEQ3': EQ3_BANKS,
'Erosion': ERO_BANKS,
'FilterDelay': FLD_BANKS,
'Flanger': FLG_BANKS,
'FrequencyShifter': FRS_BANKS,
'GrainDelay': GRD_BANKS,
'Looper': LPR_BANKS,
'MultibandDynamics': MBD_BANKS,
'Overdrive': OVR_BANKS,
'Phaser': PHS_BANKS,
'Redux': RDX_BANKS,
'Saturator': SAT_BANKS,
'Resonator': RSN_BANKS,
'CrossDelay': SMD_BANKS,
'StereoGain': UTL_BANKS,
'Tube': DTB_BANKS,
'Reverb': RVB_BANKS,
'Vinyl': VDS_BANKS,
'Gate': GTE_BANKS,
'PingPongDelay': PPG_BANKS,
'Vocoder': VOC_BANKS,
'Amp': AMP_BANKS,
'Cabinet': CAB_BANKS,
'GlueCompressor': GLU_BANKS}
DEVICE_BOB_DICT = {'AudioEffectGroupDevice': RCK_BOBS,
'MidiEffectGroupDevice': RCK_BOBS,
'InstrumentGroupDevice': RCK_BOBS,
'DrumGroupDevice': RCK_BOBS,
'InstrumentImpulse': IMP_BOBS,
'Operator': OPR_BOBS,
'UltraAnalog': ALG_BOBS,
'OriginalSimpler': SIM_BOBS,
'MultiSampler': SAM_BOBS,
'MidiArpeggiator': ARP_BOBS,
'LoungeLizard': ELC_BOBS,
'StringStudio': TNS_BOBS,
'Collision': COL_BOBS,
'MidiChord': CRD_BOBS,
'MidiNoteLength': NTL_BOBS,
'MidiPitcher': PIT_BOBS,
'MidiRandom': RND_BOBS,
'MidiScale': SCL_BOBS,
'MidiVelocity': VEL_BOBS,
'AutoFilter': AFL_BOBS,
'AutoPan': APN_BOBS,
'BeatRepeat': BRP_BOBS,
'Chorus': CHR_BOBS,
'Compressor2': CP3_BOBS,
'Corpus': CRP_BOBS,
'Eq8': EQ8_BOBS,
'FilterEQ3': EQ3_BOBS,
'Erosion': ERO_BOBS,
'FilterDelay': FLD_BOBS,
'Flanger': FLG_BOBS,
'FrequencyShifter': FRS_BOBS,
'GrainDelay': GRD_BOBS,
'Looper': LPR_BOBS,
'MultibandDynamics': MBD_BOBS,
'Overdrive': OVR_BOBS,
'Phaser': PHS_BOBS,
'Redux': RDX_BOBS,
'Saturator': SAT_BOBS,
'Resonator': RSN_BOBS,
'CrossDelay': SMD_BOBS,
'StereoGain': UTL_BOBS,
'Tube': DTB_BOBS,
'Reverb': RVB_BOBS,
'Vinyl': VDS_BOBS,
'Gate': GTE_BOBS,
'PingPongDelay': PPG_BOBS,
'Vocoder': VOC_BOBS,
'Amp': AMP_BOBS,
'Cabinet': CAB_BOBS,
'GlueCompressor': GLU_BOBS}
BANK_NAME_DICT = {'AudioEffectGroupDevice': RCK_BNK_NAMES,
'MidiEffectGroupDevice': RCK_BNK_NAMES,
'InstrumentGroupDevice': RCK_BNK_NAMES,
'DrumGroupDevice': RCK_BNK_NAMES,
'InstrumentImpulse': IMP_BNK_NAMES,
'Operator': OPR_BNK_NAMES,
'UltraAnalog': ALG_BNK_NAMES,
'OriginalSimpler': SIM_BNK_NAMES,
'MultiSampler': SAM_BNK_NAMES,
'MidiArpeggiator': ARP_BNK_NAMES,
'LoungeLizard': ELC_BNK_NAMES,
'StringStudio': TNS_BNK_NAMES,
'Collision': COL_BNK_NAMES,
'MidiChord': CRD_BNK_NAMES,
'BeatRepeat': BRP_BNK_NAMES,
'Compressor2': CP3_BNK_NAMES,
'Corpus': CRP_BNK_NAMES,
'Eq8': EQ8_BNK_NAMES,
'FilterDelay': FLD_BNK_NAMES,
'Flanger': FLG_BNK_NAMES,
'Gate': GTE_BNK_NAMES,
'MultibandDynamics': MBD_BNK_NAMES,
'Phaser': PHS_BNK_NAMES,
'Saturator': SAT_BNK_NAMES,
'Resonator': RSN_BNK_NAMES,
'Reverb': RVB_BNK_NAMES,
'Vocoder': VOC_BNK_NAMES,
'Amp': AMP_BNK_NAMES,
'GlueCompressor': GLU_BNK_NAMES,
'AutoFilter': AFL_BNK_NAMES}
MAX_DEVICES = ('MxDeviceInstrument', 'MxDeviceAudioEffect', 'MxDeviceMidiEffect')
def device_parameters_to_map(device):
return tuple(device.parameters[1:])
def parameter_bank_names(device, bank_name_dict = BANK_NAME_DICT, ubermap_skip = False):
""" Determine the bank names to use for a device """
if device != None:
if not ubermap_skip:
ubermap_banks = ubermap.get_custom_device_banks(device)
if ubermap_banks:
return ubermap_banks
ubermap.dump_device(device)
if device.class_name in bank_name_dict.keys():
return bank_name_dict[device.class_name]
else:
banks = number_of_parameter_banks(device)
def _default_bank_name(bank_index):
return 'Bank ' + str(bank_index + 1)
if device.class_name in MAX_DEVICES and banks != 0:
def _is_ascii(c):
return ord(c) < 128
def _bank_name(bank_index):
try:
name = device.get_bank_name(bank_index)
except:
name = None
if name:
return str(filter(_is_ascii, name))
else:
return _default_bank_name(bank_index)
return map(_bank_name, range(0, banks))
else:
return map(_default_bank_name, range(0, banks))
return []
def parameter_banks(device, device_dict = DEVICE_DICT, ubermap_skip = False):
""" Determine the parameters to use for a device """
if device != None:
if not ubermap_skip:
ubermap_params = ubermap.get_custom_device_params(device)
if ubermap_params:
return ubermap_params
if device.class_name in device_dict.keys():
def names_to_params(bank):
return map(partial(get_parameter_by_name, device), bank)
return map(names_to_params, device_dict[device.class_name])
else:
if device.class_name in MAX_DEVICES:
try:
banks = device.get_bank_count()
except:
banks = 0
if banks != 0:
def _bank_parameters(bank_index):
try:
parameter_indices = device.get_bank_parameters(bank_index)
except:
parameter_indices = []
if len(parameter_indices) != 8:
return [ None for i in range(0, 8) ]
else:
return [ (device.parameters[i] if i != -1 else None) for i in parameter_indices ]
return map(_bank_parameters, range(0, banks))
return group(device_parameters_to_map(device), 8)
return []
""" Original function, not working with M4L devices and plugins. Probably due to weird decompilation issues. """
#def best_of_parameter_bank(device, device_bob_dict = DEVICE_BOB_DICT):
#bobs = device and device.class_name in device_bob_dict and device_bob_dict[device.class_name]
#if not len(bobs) == 1:
#raise AssertionError
#return map(partial(get_parameter_by_name, device), bobs[0])
#if device.class_name in MAX_DEVICES:
#try:
#parameter_indices = device.get_bank_parameters(-1)
#return [ (device.parameters[i] if i != -1 else None) for i in parameter_indices ]
#except:
#return []
#return []
def best_of_parameter_bank(device, device_bob_dict = DEVICE_BOB_DICT, ubermap_skip = False):
""" Revised function by Stray that should work fine with any type of device. """
if not ubermap_skip:
ubermap_bank = ubermap.get_custom_device_params(device, ubermap.SECTION_BEST_OF)
if ubermap_bank:
return ubermap_bank[0]
if device.class_name in MAX_DEVICES:
try:
parameter_indices = device.get_bank_parameters(-1)
return [ (device.parameters[i] if i != -1 else None) for i in parameter_indices ]
except:
return []
elif device and device.class_name in device_bob_dict and device_bob_dict[device.class_name]:
return map(partial(get_parameter_by_name, device), device_bob_dict[device.class_name][0])
return []
def number_of_parameter_banks(device, device_dict = DEVICE_DICT):
""" Determine the amount of parameter banks the given device has """
if device != None:
if device.class_name in device_dict.keys():
device_bank = device_dict[device.class_name]
return len(device_bank)
else:
if device.class_name in MAX_DEVICES:
try:
banks = device.get_bank_count()
except:
banks = 0
if banks != 0:
return banks
param_count = len(device.parameters[1:])
return param_count / 8 + (1 if param_count % 8 else 0)
return 0
def get_parameter_by_name(device, name):
""" Find the given device's parameter that belongs to the given name """
for i in device.parameters:
if i.original_name == name:
return i
| {
"repo_name": "jim-cooley/abletonremotescripts",
"path": "remote-scripts/samples/ubermap-master/Devices/Devices.py",
"copies": "1",
"size": "48999",
"license": "apache-2.0",
"hash": -5029983746829039000,
"line_mean": 58.034939759,
"line_max": 283,
"alpha_frac": 0.6156452172,
"autogenerated": false,
"ratio": 2.411605472979624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3527250690179624,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from functional.util import compose, parallelize
class ExecutionStrategies(object):
"""
Enum like object listing the types of execution strategies.
"""
PRE_COMPUTE = 0
PARALLEL = 1
class ExecutionEngine(object):
"""
Class to perform serial execution of a Sequence evaluation.
"""
def evaluate(self, sequence, transformations):
"""
Execute the sequence of transformations in serial
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
"""
# pylint: disable=no-self-use
result = sequence
for transform in transformations:
strategies = transform.execution_strategies
if strategies is not None and ExecutionStrategies.PRE_COMPUTE in strategies:
result = transform.function(list(result))
else:
result = transform.function(result)
return iter(result)
class ParallelExecutionEngine(ExecutionEngine):
"""
Class to perform parallel execution of a Sequence evaluation.
"""
def __init__(self, processes=None, partition_size=None):
"""
Set the number of processes for parallel execution.
:param processes: Number of parallel Processes
"""
super(ParallelExecutionEngine, self).__init__()
self.processes = processes
self.partition_size = partition_size
def evaluate(self, sequence, transformations):
"""
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
"""
result = sequence
parallel = partial(
parallelize, processes=self.processes, partition_size=self.partition_size
)
staged = []
for transform in transformations:
strategies = transform.execution_strategies or {}
if ExecutionStrategies.PARALLEL in strategies:
staged.insert(0, transform.function)
else:
if staged:
result = parallel(compose(*staged), result)
staged = []
if ExecutionStrategies.PRE_COMPUTE in strategies:
result = list(result)
result = transform.function(result)
if staged:
result = parallel(compose(*staged), result)
return iter(result)
| {
"repo_name": "EntilZha/ScalaFunctional",
"path": "functional/execution.py",
"copies": "2",
"size": "2583",
"license": "mit",
"hash": -2396390930550984700,
"line_mean": 32.9868421053,
"line_max": 88,
"alpha_frac": 0.6225319396,
"autogenerated": false,
"ratio": 5.207661290322581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.683019322992258,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from functools import partialmethod
from django.db import models
from django.db.models import Case
from django.db.models import Max
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import When
from django.db.models.functions import Coalesce
from django.db.models.fields.related import resolve_relation, make_model_tuple
from django_types.utils import dependency_tuple
from ..expressions import BypassExpression
from .mixins import UniqueForFieldsMixin
class OrderByField(UniqueForFieldsMixin, models.Field):
""" Integer that determine display or sort order of records """
# Function name templates
func_local_next = 'get_next_in_order'
func_local_previous = 'get_previous_in_order'
func_local_get_set = 'get_%(name)s_set'
func_local_set_set = 'set_%(name)s_set'
func_remote_get_set = 'get_%(model)s_set'
func_remote_set_set = 'set_%(model)s_set'
# Will use unique_for_fields if specified, otherwise unique by default
def __init__(self, *args, **kwargs):
if 'default' in kwargs:
raise ValueError('OrderByField may not have a default value')
# Default None suppresses migration requests to set a default
# TODO Add automatically filling to migrations
super().__init__(*args, default=None, **kwargs)
def get_dependencies(self):
return [
dependency_tuple(
app_label=self.model._meta.app_label,
object_name=self.model._meta.object_name,
field=field.name,
created=True)
for field_name in self.unique_for_fields
for field in (self.model._meta.get_field(field_name), )]
def contribute_to_class(self, cls, *args, **kwargs):
super().contribute_to_class(cls, *args, **kwargs)
# Add order related methods to model
# Applying partialmethod() to already bound methods will retain self and add the model_instance bound to
subs = {'name': self.name, 'model': self.model.__name__.lower()}
setattr(cls, self.func_local_next % subs, partialmethod(self.get_next_or_previous_in_order, is_next=True))
setattr(cls, self.func_local_previous % subs, partialmethod(self.get_next_or_previous_in_order, is_next=False))
setattr(cls, self.func_local_get_set % subs, partialmethod(self.get_group_order))
setattr(cls, self.func_local_set_set % subs, partialmethod(self.set_group_order))
if self.unique_for_fields:
# Declare that this field has dependencies
self.has_dependencies = True
# Queue rest of work for when model is fully loaded
cls._meta.apps.lazy_model_operation(
self._lazy_contribute_to_class,
(cls._meta.app_label, cls._meta.model_name))
def _lazy_contribute_to_class(self, model):
# Sanity check
assert(self.model == model)
# Get foreign keys in the grouping
field_fks = {
field.name: field
for field_name in self.unique_for_fields
for field in (model._meta.get_field(field_name), )
if not field.auto_created and field.many_to_one}
# Extract all associated generic relations
generic_fks = {
field.name: field
for field in model._meta.local_fields
if (field.many_to_one and not field.remote_field) # find generic fks
and (field.name in field_fks or field.fk_field in field_fks) # associated with this grouping
and field_fks.pop(field.name, True) # and discard their fields
and field_fks.pop(field.fk_field, True)} # from the field_fks list
# Queue creation of remote order accessors
for field in field_fks.values():
model._meta.apps.lazy_model_operation(
partial(self.contribute_to_related_class, field=field),
make_model_tuple(resolve_relation(model, field.remote_field.model)))
# TODO Find GenericRelations and add accessors
def contribute_to_related_class(self, cls, field):
subs = {'name': self.name, 'model': self.model.__name__.lower(), 'remote_name': field.name}
setattr(cls, self.func_remote_get_set % subs, partialmethod(self.get_group_order, field=field))
setattr(cls, self.func_remote_set_set % subs, partialmethod(self.set_group_order, field=field))
def get_internal_type(self):
return "PositiveIntegerField"
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Remove default from field definition
kwargs.pop('default', None)
return name, path, args, kwargs
def get_next_or_previous_in_order(self, model_instance, is_next=True):
if not model_instance.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
group_qs = self.get_group(model_instance).order_by(self.attname)
# Filter out everything on the wrong side of this record
filter_clause = '{field}__{direction}'.format(
field=self.attname,
direction='gt' if is_next else 'lt')
filtered = group_qs.filter(**{filter_clause: getattr(model_instance, self.attname)})
# Return the right end based on direction
if is_next:
return filtered.first()
return filtered.last()
def pre_save(self, model_instance, add):
# Default to the next number larger than existing records, or start from 0
if add and not getattr(model_instance, self.attname):
return self.get_next_expression(model_instance)
else:
return super().pre_save(model_instance, add)
def get_next_expression(self, model_instance):
""" Generate an expression that will evaluate to the next valid ordering value """
# This will be the next number larger than existing records in the ordering set
# If no records in the ordering set, start from 0
# Evade any custom model managers
qs = models.QuerySet(self.model).filter(**self.get_filter_kwargs_for_object(model_instance))
qs = qs.annotate(_next=Max(self.attname) + 1).values('_next').order_by()
# Hackishly clip group_by clause to guarantee single result
qs.query.group_by = []
return BypassExpression(Coalesce(Subquery(qs), 0, output_field=models.IntegerField()))
def get_group_order(self, model_instance, *, field=None, limit_to=None):
""" Get the ordered group associated with an object
* model_instance :: (bound) Source instance of the call
* field :: Local fk that connects to source model if it's remote
* limit_to :: An optional self.model instance to limit to one group
when doing a remote call into composite fk groupings
"""
filters = Q()
if field:
# Apply filter from remote field calls
filters &= Q(**field.forward_related_filter(model_instance))
if limit_to:
# Apply local additive filter for remote field calls
filters &= Q(**self.get_filter_kwargs_for_object(limit_to))
else:
# Apply filter for local field calls
filters &= Q(**self.get_filter_kwargs_for_object(model_instance))
return self.model.objects.filter(filters).order_by(*self.group_attnames, self.attname).values_list('pk', flat=True)
def set_group_order(self, model_instance, id_list, *, field=None, reset_values=False, using=None):
""" Set the ordering for a group
* model_instance :: (bound) Source instance of the call
* id_list :: List of primary keys (or a queryset) that will be moved
to the end of their ordering set in order
Has the effect of reordering all listed to match order specified
* field :: Local fk that connects to source model if it's remote
* reset_values :: Boolean to indicate whether to freshly renumber
entire group from 0
Must be updating entire group to reset_values
"""
# Case expression to number instances in correct order
enum_case = Case(*[When(pk=pk, then=i) for i, pk in enumerate(id_list)])
# Bulk update with next value + enumerated value
group_qs = self.get_group(model_instance)
update_qs = group_qs.filter(pk__in=id_list)
update_count = update_qs.update(**{self.attname: self.get_next_expression(model_instance) + enum_case})
# Can only safely reset up whole group was updated
if reset_values and update_count == group_qs.count():
# Bulk update with just enumerated value
update_qs.update(**{self.attname: enum_case})
# TODO Even better with enumerated CTE
# NOTE Possible fallback for some dbs? Update sequentially
# for pk in id_list:
# qs.filter(pk=pk).update(**{self.attname: value})
| {
"repo_name": "ashleywaite/django-more",
"path": "django_more/fields/orderbyfield.py",
"copies": "1",
"size": "9106",
"license": "bsd-3-clause",
"hash": 8983970108380691000,
"line_mean": 49.032967033,
"line_max": 123,
"alpha_frac": 0.6416648364,
"autogenerated": false,
"ratio": 4.061552185548617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203217021948617,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from functools import update_wrapper
__author__ = "Christopher Henderson"
__copyright__ = "Copyright 2015, Christopher Henderson"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Christopher Henderson"
__email__ = "chris@chenderson.org"
class Decorator(object):
'''
Defines an interface for class based decorators.
PURPOSE:
The normal protocol of a decorator dictates that an __init__
and a __call__ should be defined. If __init__ accepts only one
argument, then it will be given the function to be decorated and can be
used without parenthesis. E.G.:
@my_decorator
def some_func():
pass
If __init__ takes in arguments related to the decorator itself, then
__call__ must accept the decorated function and return the desired wrapper.
The result being that a decorator that takens in optional arguments can
end up looking like this:
@my_decorator(verbose=True)
def some_func():
pass
@my_decorator()
def some_other_func():
pass
This is cumbersome and leads to confusion on whether or not a particuler
no-argument decorator requires parenthesis or not.
In addition, many programmers newer to Python are at a loss on how to pass
self to a decorated instance method.
As such, the purpose of the Decorator class is to abstract away the
nuances of function wrapping, __call__ behavior, and non-data descriptors.
PROTOCAL:
When inheriting from this class the typical protocol for writing a
decorator class changes slightly.
__decorator__:
Must be overriden.
This is where the decorating behavior should be written, as opposed
to __call__.
__wrap__:
Optionally overriden.
Defines how this class wraps a target function.
The wrapped function can be found at self.function.
SIMPLE EXAMPLE:
############################################################################
class Logged(Decorator):
def __decorator__(self, *args, **kwargs):
print ("Now calling {FUNC}".format(FUNC=self.function.__name__))
function_result = self.function(*args, **kwargs)
print ("Finished {FUNC}".format(FUNC=self.function.__name__))
return function_result
@Logged
def add(a, b):
return a + b
result = add(1, 2)
print (result)
############################################################################
OUTPUTS:
Now calling add
Finished add
3
COMPLEX EXAMPLE:
############################################################################
class Logged(Decorator):
def __init__(self, function=None, verbose=False):
self.verbose = verbose
super(Logged, self).__init__(function)
def __decorator__(self, *args, **kwargs):
if self.verbose:
print ("Now calling {FUNC}".format(
FUNC=self.function.__name__)
)
function_result = self.function(*args, **kwargs)
if self.verbose:
print ("Finished {FUNC}".format(
FUNC=self.function.__name__)
)
return function_result
class Math(object):
@staticmethod
@Logged
def add(a, b):
return a + b
@staticmethod
@Logged(verbose=True)
def subract(a, b):
return a - b
print (Math.add(1, 2))
print (Math.subract(2, 1))
############################################################################
OUTPUTS:
3
Now calling subract
Finished subract
1
'''
def __init__(self, function=None):
'''
If function is left undefined, then function wrapping is deferred
until the first time __call__ is executed.
'''
self.function = function
if function:
self.__wrap__(function)
def __decorator__(self, *args, **kwargs):
'''
__decorator__ must be defined by the inheriting classes as a surrogate
to __call__. That is, behavior that you would be typically placed under
__call__ should be placed under __decorator__ instead.
'''
raise NotImplementedError("Call behavior is not defined in this abstract class")
def __wrap__(self, function):
'''
Called at the time when the decorating class is
given its function to wrap.
'''
self.function = function
update_wrapper(self, function)
return self
def __call__(self, *args, **kwargs):
'''
Depending on how this decorator was defined, __call__ will either
execute the target function or it will wrap the target function.
If a function was received during instantation then __decorator__ will
be called immediately as we have already succesfully wrapped the
target function.
Otherwise this decorator was given keyword arguments,
which means function wrapping was deferred until now.
'''
if self.function:
return self.__decorator__(*args, **kwargs)
return self.__wrap__(args[0])
def __get__(self, instance, klass=None):
'''
Non-data descriptor for inserting an instance as the first parameter
to __call__ if this object is being accessed as a member.
'''
if instance is None:
return self
return partial(self, instance)
| {
"repo_name": "christopher-henderson/Decorator",
"path": "decorator.py",
"copies": "1",
"size": "5627",
"license": "mit",
"hash": 712447320455724500,
"line_mean": 29.7486338798,
"line_max": 88,
"alpha_frac": 0.5661986849,
"autogenerated": false,
"ratio": 4.842512908777969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5908711593677969,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from functools import wraps
from flask import current_app
from jsonschema import validate
from flask import request
from werkzeug.local import LocalProxy
_fja = LocalProxy(lambda: current_app.extensions['flask_jsonschema_ext'])
def generate_jsonschema(database_entity, parse_tree=None):
"""Generate a JSONSchema from a database entity"""
return _fja.driver().convert_entity_tree(database_entity, parse_tree=parse_tree)
def jsonschema(schema_generation_fn, cached=True):
"""Decorate a method to be protected by a jsonschema using the schema generation function specified"""
def decorator(func, cache=None):
@wraps(func)
def wrapper(*args, **kwargs):
# Get the schema from the cache or generate it
if cached and cache is not None:
try:
schema = cache['schema']
except KeyError:
schema = schema_generation_fn()
cache['schema'] = schema
else:
schema = schema_generation_fn()
# Validate the request as it comes in
validate(request.get_json(), schema)
return func(*args, **kwargs)
return wrapper
if cached is True:
return partial(decorator, cache={})
return decorator
def jsonschema_generate(database_entity, cached=True, parse_tree=None):
"""Shorthand for protecting a method with jsonschema and using generate_jsonschema on a database entity"""
return jsonschema(partial(generate_jsonschema, database_entity, parse_tree=parse_tree), cached=cached)
# Deprecated, here for backwards compatibility with 0.1.2
# TODO: remove in 1.0.0
schema_json = jsonschema_generate
def _get_state(app, driver, **kwargs):
kwargs.update(dict(
app=app,
driver=driver,
))
return _State(**kwargs)
class _State(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
class FlaskJsonSchemaExt(object):
def __init__(self, app=None, driver=None, **kwargs):
if app is not None:
self._state = self.init_app(app, driver, **kwargs)
def init_app(self, app, driver, **kwargs):
state = _get_state(app, driver, **kwargs)
app.extensions['flask_jsonschema_ext'] = state
return state
| {
"repo_name": "rachekalmir/flask-jsonschema-ext",
"path": "flask_jsonschema_ext/core.py",
"copies": "2",
"size": "2393",
"license": "mit",
"hash": -4311446943045841000,
"line_mean": 30.0779220779,
"line_max": 110,
"alpha_frac": 0.645215211,
"autogenerated": false,
"ratio": 4.147313691507799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5792528902507799,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from functools import wraps
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def doNothing(*args, **kwargs):
'''
A test function that takes any arguments and does nothing.
'''
doNothing.called = True
doNothing.called = False
def returnValue(return_value):
'''
A test function that returns a value.
'''
return return_value
def raiseException(exception=None):
'''
A test function that raises an exception.
'''
if exception is not None:
raise exception
else:
raise Exception
def decorator(func=None, **kwargs):
'''
A Sleuth-style decorator for testing purposes.
'''
if func is None:
return partial(decorator, **kwargs)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def doNothing_callback(func):
doNothing_callback.called = True
doNothing_callback.called = False
def returnValue_callback(func, result):
returnValue_callback.called = True
returnValue_callback.called = False
| {
"repo_name": "emrob/sleuth",
"path": "test/fakemodule.py",
"copies": "1",
"size": "1126",
"license": "mit",
"hash": -8598728797562778000,
"line_mean": 18.0847457627,
"line_max": 62,
"alpha_frac": 0.6731793961,
"autogenerated": false,
"ratio": 4.2330827067669174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016472803810789853,
"num_lines": 59
} |
from functools import partial
from glob import glob
from itertools import chain
from jsbuild.dependency import Dependency
from jsbuild.logging import logger
from jsbuild.manifest import Manifest
from jsbuild.maps import get_class_by_format
from jsbuild import templates
import os.path
import re
clean_backdir = lambda path: re.sub('^(\.\.\/?)+','',path)
count_backdir = lambda path: get_backdir(path).count('../')
has_backdir = lambda path: re.match('^\.\.',path) and True or False
join_path = lambda *args: os.path.normpath(os.path.join(*args))
def get_backdir(path):
search = re.search('((?:\.\.\/)+)',path)
return os.path.normpath(search.groups()[0]) if search else ''
class Index(Dependency):
def __init__(self,*args,**kwargs):
super(Index,self).__init__(*args,**kwargs)
self._buffer_ = None
self._manifest_ = None
self._dependencies_ = None
self.to_call = []
@property
def buffer(self):
if not self._buffer_:
self._buffer_ = self.read()
return self._buffer_
@property
def content(self):
root = self
while root.index: root = root.index
name = root.manifest.name
content = '\n'.join(map(lambda dep: dep.content if not isinstance(dep,Index) or not dep.get_config('filename',False) else dep.put() or '', self.dependencies))
if not self.index:
content = templates.package%{ "name":name, "content":content }
for flname in self.to_call:
content = '%s\n%s'%(content,templates.maincall%{ "index_name":root.manifest.name, "filename":flname})
for rpl in self.get_config('replacements',[]):
content = re.sub(rpl['pattern'],rpl['replacement']%self.get_config('dict',{}),content,flags=re.DOTALL)
return content
@property
def dependencies(self):
if self._dependencies_ == None:
self.import_manifest()
return self._dependencies_
@property
def manifest(self):
if self._manifest_ == None:
self._manifest_ = Manifest(self.parse())
return self._manifest_
def get_config(self,key,default=None):
return self.manifest.build.__contains__(key) and self.manifest['build'][key] or default
@property
def source_dir(self):
return os.path.normpath(os.path.join(self.working_dir,self.get_config('dir','')))
@property
def path(self):
logger.debug('Trying to find client-side path of "%s" (:working_dir %s :source_dir %s)'%(self.src,self.working_dir,self.source_dir))
if not self.index: return ''
parent = self.index
parent_ref = get_backdir(self.src)
while parent and has_backdir(parent_ref):
parent_dir = join_path(os.path.dirname(parent.src) if parent.index else '',parent.get_config('dir',''))
parent_dir_merged = join_path(clean_backdir(parent_dir),parent_ref)
if len(parent_dir_merged)>0 and not parent_dir_merged=='.' and (not has_backdir(parent_dir_merged)):
break
parent_ref = join_path(parent_dir if parent.index and parent.index.index else clean_backdir(parent_dir),parent_ref)
parent = parent.index
path = join_path(parent.path if parent else '',clean_backdir(os.path.dirname(self.src)))
return path if path!='.' else ''
def import_manifest(self):
logger.debug('Importing manifest document')
self._dependencies_ = []
sdir = self.source_dir
files = [ el for el in map(partial(lambda path: os.path.join(sdir,path)),self.get_config('files',[])) ]
for depinfo in chain(*map(glob,files)):
src = depinfo if not self.source_dir else depinfo[len(self.source_dir)+1:]
dp = get_class_by_format(src)(index=self)
dp.src = src
self.dependencies.append(dp)
def parse(self,content):
raise Exception('Not Implemented')
def put(self):
filename = os.path.normpath(os.path.join(self.working_dir, self.get_config('filename')))
with open('%s'%filename,'w',encoding='utf-8') as fl:
fl.write(self.content)
logger.info('Writing %s OK'%filename)
| {
"repo_name": "azer/jsbuild",
"path": "jsbuild/index.py",
"copies": "1",
"size": "3930",
"license": "mit",
"hash": 6466859445148958000,
"line_mean": 32.3050847458,
"line_max": 162,
"alpha_frac": 0.6699745547,
"autogenerated": false,
"ratio": 3.438320209973753,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608294764673753,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from hamcrest import assert_that, contains_string
from hamcrest import not_
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_step
from allure_commons_test.result import has_attachment
from allure_commons_test.result import has_parameter
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import has_link
from allure_commons_test.result import has_description
from allure_commons_test.container import has_container
from allure_commons_test.container import has_before, has_after
from allure_commons_test.label import has_severity
from allure_commons_test.label import has_tag
from allure_commons_test.label import has_label
def match(matcher, *args):
for i, arg in enumerate(args):
if not hasattr(arg, '__call__'):
matcher = partial(matcher, arg)
else:
matcher = partial(matcher, match(arg, *args[i+1:]))
break
return matcher()
@then(u'allure report has a scenario with name "{scenario}"')
def step_scenario(context, scenario):
matcher = partial(match, has_test_case, scenario)
context.scenario = matcher
assert_that(context.allure_report, matcher())
@then(u'allure report has not a scenario with name "{scenario}"')
def step_scenario(context, scenario):
matcher = partial(match, not_, has_test_case, scenario)
context.scenario = matcher
assert_that(context.allure_report, matcher())
@then(u'scenario has before fixture "{fixture}"')
@then(u'this scenario has before fixture "{fixture}"')
def step_before_fixture(context, fixture):
context_matcher = context.scenario
matcher = partial(context_matcher, has_container, context.allure_report, has_before, fixture)
context.before = matcher
assert_that(context.allure_report, matcher())
@then(u'scenario has after fixture "{fixture}"')
@then(u'this scenario has after fixture "{fixture}"')
def step_after_fixture(context, fixture):
context_matcher = context.scenario
matcher = partial(context_matcher, has_container, context.allure_report, has_after, fixture)
context.after = matcher
assert_that(context.allure_report, matcher())
@then(u'scenario has not before fixture "{fixture}"')
@then(u'this scenario has not before fixture "{fixture}"')
def step_no_before_fixture(context, fixture):
context_matcher = context.scenario
matcher = partial(context_matcher, not_, has_container, context.allure_report, has_before, fixture)
assert_that(context.allure_report, matcher())
@then(u'scenario has not after fixture "{fixture}"')
@then(u'this scenario has not after fixture "{fixture}"')
def step_impl(context, fixture):
context_matcher = context.scenario
matcher = partial(context_matcher, not_, has_container, context.allure_report, has_after, fixture)
assert_that(context.allure_report, matcher())
@then(u'{item} contains step "{step}"')
@then(u'this {item} contains step "{step}"')
def step_step(context, item, step):
context_matcher = getattr(context, item)
matcher = partial(context_matcher, has_step, step)
context.step = matcher
assert_that(context.allure_report, matcher())
@then(u'{item} has "{status}" status')
@then(u'this {item} has "{status}" status')
def step_status(context, item, status):
context_matcher = getattr(context, item)
matcher = partial(context_matcher, with_status, status)
assert_that(context.allure_report, matcher())
@then(u'{item} has status details with message "{message}"')
@then(u'this {item} has status details with message "{message}"')
def step_status(context, item, message):
context_matcher = getattr(context, item)
matcher = partial(context_matcher, has_status_details, with_message_contains, message)
assert_that(context.allure_report, matcher())
@then(u'scenario has "{severity}" severity')
@then(u'this scenario has "{severity}" severity')
def step_severity(context, severity):
context_matcher = context.scenario
matcher = partial(context_matcher, has_severity, severity)
assert_that(context.allure_report, matcher())
@then(u'scenario has "{tag}" tag')
@then(u'this scenario has "{tag}" tag')
def step_tag(context, tag):
context_matcher = context.scenario
matcher = partial(context_matcher, has_tag, tag)
assert_that(context.allure_report, matcher())
@then(u'scenario has "{url}" link')
@then(u'this scenario has "{url}" link')
@then(u'scenario has "{url}" link with type "{link_type}"')
@then(u'this scenario has "{url}" link with type "{link_type}"')
@then(u'scenario has "{url}" link with type "{link_type}" and name "{name}"')
@then(u'this scenario has "{url}" link with type "{link_type}" and name "{name}"')
def step_link(context, url, link_type=None, name=None,):
context_matcher = context.scenario
matcher = partial(context_matcher, has_link, url, link_type, name)
assert_that(context.allure_report, matcher())
@then(u'scenario has "{name}" label with value "{value}"')
@then(u'this scenario has "{name}" label with value "{value}"')
def step_label(context, name, value):
context_matcher = context.scenario
matcher = partial(context_matcher, has_label, name, value)
assert_that(context.allure_report, matcher())
@then(u'{item} has parameter "{name}" with value "{value}"')
@then(u'this {item} has parameter "{name}" with value "{value}"')
def step_parameter(context, item, name, value):
context_matcher = getattr(context, item)
matcher = partial(context_matcher, has_parameter, name, value)
assert_that(context.allure_report, matcher())
@then(u'{item} has attachment')
@then(u'this {item} has attachment')
def step_attachment(context, item):
context_matcher = getattr(context, item)
matcher = partial(context_matcher, has_attachment)
assert_that(context.allure_report, matcher())
@then(u'scenario has description "{description}"')
def step_description(context, description):
context_matcher = context.scenario
matcher = partial(context_matcher, has_description, contains_string(description))
assert_that(context.allure_report, matcher())
| {
"repo_name": "allure-framework/allure-python",
"path": "allure-behave/features/steps/report_steps.py",
"copies": "1",
"size": "6256",
"license": "apache-2.0",
"hash": 6278565976207351000,
"line_mean": 38.3459119497,
"line_max": 103,
"alpha_frac": 0.7233056266,
"autogenerated": false,
"ratio": 3.5545454545454547,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9775508232302703,
"avg_score": 0.00046856976855012773,
"num_lines": 159
} |
from functools import partial
from hashlib import md5
import bleach
from django.conf import settings
from django.core.cache import cache
from django.utils.safestring import mark_safe
LONG_CACHE = 60 * 60 * 24 * 7
def cached_render(render_function, source, cache_tag, cache_time=LONG_CACHE):
"""Render a string through a function, using the cache for the result.
The render_function argument should be a single-argument function taking a
byte or Unicode string and returning a byte or Unicode string.
The cache_tag parameter should be a byte string specific to the rendering
function, so the cached result can survive restarts but two separate
functions won't tread on each other's toes.
The result will be returned as a SafeString or SafeUnicode, and so can be
rendered directly as HTML.
"""
# Make sure the cache key is a byte string, not a Unicode string
encoded = source.encode('utf8') if isinstance(source, unicode) else source
cache_key = md5(encoded).hexdigest() + str(cache_tag)
cached = cache.get(cache_key)
if cached:
return mark_safe(cached)
rendered = render_function(source)
cache.set(cache_key, rendered, cache_time)
return mark_safe(rendered)
# Generate a bleach cache tag that will be sensitive to changes in settings
_bleach_settings_string = str(settings.TAGS) + str(settings.ALLOWED_ATTRIBUTES)
BLEACH_CACHE_TAG = md5(_bleach_settings_string).hexdigest()
def cached_bleach(source):
"""Render a string through the bleach library, caching the result."""
render_function = partial(bleach.clean,
tags=settings.TAGS,
attributes=settings.ALLOWED_ATTRIBUTES)
return cached_render(render_function, source, cache_tag=BLEACH_CACHE_TAG)
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
from warnings import warn
warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
| {
"repo_name": "mozilla/mozilla-ignite",
"path": "apps/challenges/lib.py",
"copies": "1",
"size": "4237",
"license": "bsd-3-clause",
"hash": 1828373387150156800,
"line_mean": 34.906779661,
"line_max": 79,
"alpha_frac": 0.6492801511,
"autogenerated": false,
"ratio": 4.432008368200837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008859784283513097,
"num_lines": 118
} |
from functools import partial
from html.parser import HTMLParser
from typing import Any, Callable, Dict, List, Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.models import UserProfile
COMPANY_CREATED = """
New company **{name}** created:
* **User count**: {user_count}
* **Monthly spending**: {monthly_spend}
""".strip()
CONTACT_EMAIL_ADDED = "New email {email} added to contact."
CONTACT_CREATED = """
New contact created:
* **Name (or pseudonym)**: {name}
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_SIGNED_UP = """
Contact signed up:
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_TAG_CREATED = "Contact tagged with the `{name}` tag."
CONTACT_TAG_DELETED = "The tag `{name}` was removed from the contact."
CONVERSATION_ADMIN_ASSIGNED = "{name} assigned to conversation."
CONVERSATION_ADMIN_TEMPLATE = "{admin_name} {action} the conversation."
CONVERSATION_ADMIN_REPLY_TEMPLATE = """
{admin_name} {action} the conversation:
``` quote
{content}
```
""".strip()
CONVERSATION_ADMIN_INITIATED_CONVERSATION = """
{admin_name} initiated a conversation:
``` quote
{content}
```
""".strip()
EVENT_CREATED = "New event **{event_name}** created."
USER_CREATED = """
New user created:
* **Name**: {name}
* **Email**: {email}
""".strip()
class MLStripper(HTMLParser):
def __init__(self) -> None:
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed: List[str] = []
def handle_data(self, d: str) -> None:
self.fed.append(d)
def get_data(self) -> str:
return ''.join(self.fed)
def strip_tags(html: str) -> str:
s = MLStripper()
s.feed(html)
return s.get_data()
def get_topic_for_contacts(user: Dict[str, Any]) -> str:
topic = "{type}: {name}".format(
type=user['type'].capitalize(),
name=user.get('name') or user.get('pseudonym') or user.get('email'),
)
return topic
def get_company_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = COMPANY_CREATED.format(**payload['data']['item'])
return ('Companies', body)
def get_contact_added_email_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = CONTACT_EMAIL_ADDED.format(email=user['email'])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_contact_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload['data']['item']
body = CONTACT_CREATED.format(
name=contact.get('name') or contact.get('pseudonym'),
email=contact['email'],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact['location_data'],
),
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_signed_up_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload['data']['item']
body = CONTACT_SIGNED_UP.format(
email=contact['email'],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact['location_data'],
),
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_CREATED.format(**payload['data']['item']['tag'])
contact = payload['data']['item']['contact']
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_DELETED.format(**payload['data']['item']['tag'])
contact = payload['data']['item']['contact']
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_conversation_admin_assigned_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONVERSATION_ADMIN_ASSIGNED.format(**payload['data']['item']['assignee'])
user = payload['data']['item']['user']
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
body = CONVERSATION_ADMIN_TEMPLATE.format(
admin_name=assignee.get('name'),
action=action,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_reply_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
note = payload['data']['item']['conversation_parts']['conversation_parts'][0]
content = strip_tags(note['body'])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=assignee.get('name'),
action=action,
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_single_created_message(
payload: Dict[str, Any]) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
conversation_body = payload['data']['item']['conversation_message']['body']
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=assignee.get('name'),
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']['user']
conversation_body = payload['data']['item']['conversation_message']['body']
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=user.get('name'),
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_replied_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']['user']
note = payload['data']['item']['conversation_parts']['conversation_parts'][0]
content = strip_tags(note['body'])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=user.get('name'),
action='replied to',
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_event_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
event = payload['data']['item']
body = EVENT_CREATED.format(**event)
return ('Events', body)
def get_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = USER_CREATED.format(**user)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
topic = get_topic_for_contacts(user)
return (topic, 'User deleted.')
def get_user_email_updated_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = 'User\'s email was updated to {}.'.format(user['email'])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_tagged_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
user = payload['data']['item']['user']
tag = payload['data']['item']['tag']
topic = get_topic_for_contacts(user)
body = 'The tag `{tag_name}` was {action} the user.'.format(
tag_name=tag['name'],
action=action,
)
return (topic, body)
def get_user_unsubscribed_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = 'User unsubscribed from emails.'
topic = get_topic_for_contacts(user)
return (topic, body)
EVENT_TO_FUNCTION_MAPPER = {
'company.created': get_company_created_message,
'contact.added_email': get_contact_added_email_message,
'contact.created': get_contact_created_message,
'contact.signed_up': get_contact_signed_up_message,
'contact.tag.created': get_contact_tag_created_message,
'contact.tag.deleted': get_contact_tag_deleted_message,
'conversation.admin.assigned': get_conversation_admin_assigned_message,
'conversation.admin.closed': partial(get_conversation_admin_message, action='closed'),
'conversation.admin.opened': partial(get_conversation_admin_message, action='opened'),
'conversation.admin.snoozed': partial(get_conversation_admin_message, action='snoozed'),
'conversation.admin.unsnoozed': partial(get_conversation_admin_message, action='unsnoozed'),
'conversation.admin.replied': partial(get_conversation_admin_reply_message, action='replied to'),
'conversation.admin.noted': partial(get_conversation_admin_reply_message, action='added a note to'),
'conversation.admin.single.created': get_conversation_admin_single_created_message,
'conversation.user.created': get_conversation_user_created_message,
'conversation.user.replied': get_conversation_user_replied_message,
'event.created': get_event_created_message,
'user.created': get_user_created_message,
'user.deleted': get_user_deleted_message,
'user.email.updated': get_user_email_updated_message,
'user.tag.created': partial(get_user_tagged_message, action='added to'),
'user.tag.deleted': partial(get_user_tagged_message, action='removed from'),
'user.unsubscribed': get_user_unsubscribed_message,
# Note that we do not have a payload for visitor.signed_up
# but it should be identical to contact.signed_up
'visitor.signed_up': get_contact_signed_up_message,
}
def get_event_handler(event_type: str) -> Callable[..., Tuple[str, str]]:
handler: Any = EVENT_TO_FUNCTION_MAPPER.get(event_type)
if handler is None:
raise UnexpectedWebhookEventType("Intercom", event_type)
return handler
@api_key_only_webhook_view('Intercom')
@has_request_variables
def api_intercom_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event_type = payload['topic']
if event_type == 'ping':
return json_success()
topic, body = get_event_handler(event_type)(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| {
"repo_name": "timabbott/zulip",
"path": "zerver/webhooks/intercom/view.py",
"copies": "4",
"size": "10675",
"license": "apache-2.0",
"hash": 799674196904705500,
"line_mean": 35.186440678,
"line_max": 104,
"alpha_frac": 0.6614519906,
"autogenerated": false,
"ratio": 3.4258664955070603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6087318486107062,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from html.parser import HTMLParser
from typing import Any, Callable, Dict, List, Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
COMPANY_CREATED = """
New company **{name}** created:
* **User count**: {user_count}
* **Monthly spending**: {monthly_spend}
""".strip()
CONTACT_EMAIL_ADDED = "New email {email} added to contact."
CONTACT_CREATED = """
New contact created:
* **Name (or pseudonym)**: {name}
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_SIGNED_UP = """
Contact signed up:
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_TAG_CREATED = "Contact tagged with the `{name}` tag."
CONTACT_TAG_DELETED = "The tag `{name}` was removed from the contact."
CONVERSATION_ADMIN_ASSIGNED = "{name} assigned to conversation."
CONVERSATION_ADMIN_TEMPLATE = "{admin_name} {action} the conversation."
CONVERSATION_ADMIN_REPLY_TEMPLATE = """
{admin_name} {action} the conversation:
``` quote
{content}
```
""".strip()
CONVERSATION_ADMIN_INITIATED_CONVERSATION = """
{admin_name} initiated a conversation:
``` quote
{content}
```
""".strip()
EVENT_CREATED = "New event **{event_name}** created."
USER_CREATED = """
New user created:
* **Name**: {name}
* **Email**: {email}
""".strip()
class MLStripper(HTMLParser):
def __init__(self) -> None:
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed: List[str] = []
def handle_data(self, d: str) -> None:
self.fed.append(d)
def get_data(self) -> str:
return "".join(self.fed)
def strip_tags(html: str) -> str:
s = MLStripper()
s.feed(html)
return s.get_data()
def get_topic_for_contacts(user: Dict[str, Any]) -> str:
topic = "{type}: {name}".format(
type=user["type"].capitalize(),
name=user.get("name") or user.get("pseudonym") or user.get("email"),
)
return topic
def get_company_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = COMPANY_CREATED.format(**payload["data"]["item"])
return ("Companies", body)
def get_contact_added_email_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]
body = CONTACT_EMAIL_ADDED.format(email=user["email"])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_contact_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload["data"]["item"]
body = CONTACT_CREATED.format(
name=contact.get("name") or contact.get("pseudonym"),
email=contact["email"],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact["location_data"],
),
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_signed_up_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload["data"]["item"]
body = CONTACT_SIGNED_UP.format(
email=contact["email"],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact["location_data"],
),
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_CREATED.format(**payload["data"]["item"]["tag"])
contact = payload["data"]["item"]["contact"]
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_DELETED.format(**payload["data"]["item"]["tag"])
contact = payload["data"]["item"]["contact"]
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_conversation_admin_assigned_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONVERSATION_ADMIN_ASSIGNED.format(**payload["data"]["item"]["assignee"])
user = payload["data"]["item"]["user"]
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
assignee = payload["data"]["item"]["assignee"]
user = payload["data"]["item"]["user"]
body = CONVERSATION_ADMIN_TEMPLATE.format(
admin_name=assignee.get("name"),
action=action,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_reply_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
assignee = payload["data"]["item"]["assignee"]
user = payload["data"]["item"]["user"]
note = payload["data"]["item"]["conversation_parts"]["conversation_parts"][0]
content = strip_tags(note["body"])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=assignee.get("name"),
action=action,
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_single_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
assignee = payload["data"]["item"]["assignee"]
user = payload["data"]["item"]["user"]
conversation_body = payload["data"]["item"]["conversation_message"]["body"]
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=assignee.get("name"),
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]["user"]
conversation_body = payload["data"]["item"]["conversation_message"]["body"]
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=user.get("name"),
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_replied_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]["user"]
note = payload["data"]["item"]["conversation_parts"]["conversation_parts"][0]
content = strip_tags(note["body"])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=user.get("name"),
action="replied to",
content=content,
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_event_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
event = payload["data"]["item"]
body = EVENT_CREATED.format(**event)
return ("Events", body)
def get_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]
body = USER_CREATED.format(**user)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]
topic = get_topic_for_contacts(user)
return (topic, "User deleted.")
def get_user_email_updated_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]
body = "User's email was updated to {}.".format(user["email"])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_tagged_message(
payload: Dict[str, Any],
action: str,
) -> Tuple[str, str]:
user = payload["data"]["item"]["user"]
tag = payload["data"]["item"]["tag"]
topic = get_topic_for_contacts(user)
body = "The tag `{tag_name}` was {action} the user.".format(
tag_name=tag["name"],
action=action,
)
return (topic, body)
def get_user_unsubscribed_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload["data"]["item"]
body = "User unsubscribed from emails."
topic = get_topic_for_contacts(user)
return (topic, body)
EVENT_TO_FUNCTION_MAPPER: Dict[str, Callable[[Dict[str, Any]], Tuple[str, str]]] = {
"company.created": get_company_created_message,
"contact.added_email": get_contact_added_email_message,
"contact.created": get_contact_created_message,
"contact.signed_up": get_contact_signed_up_message,
"contact.tag.created": get_contact_tag_created_message,
"contact.tag.deleted": get_contact_tag_deleted_message,
"conversation.admin.assigned": get_conversation_admin_assigned_message,
"conversation.admin.closed": partial(get_conversation_admin_message, action="closed"),
"conversation.admin.opened": partial(get_conversation_admin_message, action="opened"),
"conversation.admin.snoozed": partial(get_conversation_admin_message, action="snoozed"),
"conversation.admin.unsnoozed": partial(get_conversation_admin_message, action="unsnoozed"),
"conversation.admin.replied": partial(
get_conversation_admin_reply_message, action="replied to"
),
"conversation.admin.noted": partial(
get_conversation_admin_reply_message, action="added a note to"
),
"conversation.admin.single.created": get_conversation_admin_single_created_message,
"conversation.user.created": get_conversation_user_created_message,
"conversation.user.replied": get_conversation_user_replied_message,
"event.created": get_event_created_message,
"user.created": get_user_created_message,
"user.deleted": get_user_deleted_message,
"user.email.updated": get_user_email_updated_message,
"user.tag.created": partial(get_user_tagged_message, action="added to"),
"user.tag.deleted": partial(get_user_tagged_message, action="removed from"),
"user.unsubscribed": get_user_unsubscribed_message,
# Note that we do not have a payload for visitor.signed_up
# but it should be identical to contact.signed_up
"visitor.signed_up": get_contact_signed_up_message,
}
@webhook_view("Intercom")
@has_request_variables
def api_intercom_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
event_type = payload["topic"]
if event_type == "ping":
return json_success()
handler = EVENT_TO_FUNCTION_MAPPER.get(event_type)
if handler is None:
raise UnsupportedWebhookEventType(event_type)
topic, body = handler(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| {
"repo_name": "andersk/zulip",
"path": "zerver/webhooks/intercom/view.py",
"copies": "6",
"size": "10616",
"license": "apache-2.0",
"hash": 8922325530907713000,
"line_mean": 31.9689440994,
"line_max": 96,
"alpha_frac": 0.6610776187,
"autogenerated": false,
"ratio": 3.424516129032258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006101142024022495,
"num_lines": 322
} |
from functools import partial
from html.parser import HTMLParser
from typing import Any, Dict, Tuple, Callable, List
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, \
UnexpectedWebhookEventType
from zerver.models import UserProfile
COMPANY_CREATED = """
New company **{name}** created:
* **User count**: {user_count}
* **Monthly spending**: {monthly_spend}
""".strip()
CONTACT_EMAIL_ADDED = "New email {email} added to contact."
CONTACT_CREATED = """
New contact created:
* **Name (or pseudonym)**: {name}
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_SIGNED_UP = """
Contact signed up:
* **Email**: {email}
* **Location**: {location_info}
""".strip()
CONTACT_TAG_CREATED = "Contact tagged with the `{name}` tag."
CONTACT_TAG_DELETED = "The tag `{name}` was removed from the contact."
CONVERSATION_ADMIN_ASSIGNED = "{name} assigned to conversation."
CONVERSATION_ADMIN_TEMPLATE = "{admin_name} {action} the conversation."
CONVERSATION_ADMIN_REPLY_TEMPLATE = """
{admin_name} {action} the conversation:
``` quote
{content}
```
""".strip()
CONVERSATION_ADMIN_INITIATED_CONVERSATION = """
{admin_name} initiated a conversation:
``` quote
{content}
```
""".strip()
EVENT_CREATED = "New event **{event_name}** created."
USER_CREATED = """
New user created:
* **Name**: {name}
* **Email**: {email}
""".strip()
class MLStripper(HTMLParser):
def __init__(self) -> None:
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = [] # type: List[str]
def handle_data(self, d: str) -> None:
self.fed.append(d)
def get_data(self) -> str:
return ''.join(self.fed)
def strip_tags(html: str) -> str:
s = MLStripper()
s.feed(html)
return s.get_data()
def get_topic_for_contacts(user: Dict[str, Any]) -> str:
topic = "{type}: {name}".format(
type=user['type'].capitalize(),
name=user.get('name') or user.get('pseudonym') or user.get('email')
)
return topic
def get_company_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = COMPANY_CREATED.format(**payload['data']['item'])
return ('Companies', body)
def get_contact_added_email_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = CONTACT_EMAIL_ADDED.format(email=user['email'])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_contact_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload['data']['item']
body = CONTACT_CREATED.format(
name=contact.get('name') or contact.get('pseudonym'),
email=contact['email'],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact['location_data']
)
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_signed_up_message(payload: Dict[str, Any]) -> Tuple[str, str]:
contact = payload['data']['item']
body = CONTACT_SIGNED_UP.format(
email=contact['email'],
location_info="{city_name}, {region_name}, {country_name}".format(
**contact['location_data']
)
)
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_CREATED.format(**payload['data']['item']['tag'])
contact = payload['data']['item']['contact']
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_contact_tag_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONTACT_TAG_DELETED.format(**payload['data']['item']['tag'])
contact = payload['data']['item']['contact']
topic = get_topic_for_contacts(contact)
return (topic, body)
def get_conversation_admin_assigned_message(payload: Dict[str, Any]) -> Tuple[str, str]:
body = CONVERSATION_ADMIN_ASSIGNED.format(**payload['data']['item']['assignee'])
user = payload['data']['item']['user']
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_message(
payload: Dict[str, Any],
action: str
) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
body = CONVERSATION_ADMIN_TEMPLATE.format(
admin_name=assignee.get('name'),
action=action
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_reply_message(
payload: Dict[str, Any],
action: str
) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
note = payload['data']['item']['conversation_parts']['conversation_parts'][0]
content = strip_tags(note['body'])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=assignee.get('name'),
action=action,
content=content
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_admin_single_created_message(
payload: Dict[str, Any]) -> Tuple[str, str]:
assignee = payload['data']['item']['assignee']
user = payload['data']['item']['user']
conversation_body = payload['data']['item']['conversation_message']['body']
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=assignee.get('name'),
content=content
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']['user']
conversation_body = payload['data']['item']['conversation_message']['body']
content = strip_tags(conversation_body)
body = CONVERSATION_ADMIN_INITIATED_CONVERSATION.format(
admin_name=user.get('name'),
content=content
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_conversation_user_replied_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']['user']
note = payload['data']['item']['conversation_parts']['conversation_parts'][0]
content = strip_tags(note['body'])
body = CONVERSATION_ADMIN_REPLY_TEMPLATE.format(
admin_name=user.get('name'),
action='replied to',
content=content
)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_event_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
event = payload['data']['item']
body = EVENT_CREATED.format(**event)
return ('Events', body)
def get_user_created_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = USER_CREATED.format(**user)
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_deleted_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
topic = get_topic_for_contacts(user)
return (topic, 'User deleted.')
def get_user_email_updated_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = 'User\'s email was updated to {}.'.format(user['email'])
topic = get_topic_for_contacts(user)
return (topic, body)
def get_user_tagged_message(
payload: Dict[str, Any],
action: str
) -> Tuple[str, str]:
user = payload['data']['item']['user']
tag = payload['data']['item']['tag']
topic = get_topic_for_contacts(user)
body = 'The tag `{tag_name}` was {action} the user.'.format(
tag_name=tag['name'],
action=action
)
return (topic, body)
def get_user_unsubscribed_message(payload: Dict[str, Any]) -> Tuple[str, str]:
user = payload['data']['item']
body = 'User unsubscribed from emails.'
topic = get_topic_for_contacts(user)
return (topic, body)
EVENT_TO_FUNCTION_MAPPER = {
'company.created': get_company_created_message,
'contact.added_email': get_contact_added_email_message,
'contact.created': get_contact_created_message,
'contact.signed_up': get_contact_signed_up_message,
'contact.tag.created': get_contact_tag_created_message,
'contact.tag.deleted': get_contact_tag_deleted_message,
'conversation.admin.assigned': get_conversation_admin_assigned_message,
'conversation.admin.closed': partial(get_conversation_admin_message, action='closed'),
'conversation.admin.opened': partial(get_conversation_admin_message, action='opened'),
'conversation.admin.snoozed': partial(get_conversation_admin_message, action='snoozed'),
'conversation.admin.unsnoozed': partial(get_conversation_admin_message, action='unsnoozed'),
'conversation.admin.replied': partial(get_conversation_admin_reply_message, action='replied to'),
'conversation.admin.noted': partial(get_conversation_admin_reply_message, action='added a note to'),
'conversation.admin.single.created': get_conversation_admin_single_created_message,
'conversation.user.created': get_conversation_user_created_message,
'conversation.user.replied': get_conversation_user_replied_message,
'event.created': get_event_created_message,
'user.created': get_user_created_message,
'user.deleted': get_user_deleted_message,
'user.email.updated': get_user_email_updated_message,
'user.tag.created': partial(get_user_tagged_message, action='added to'),
'user.tag.deleted': partial(get_user_tagged_message, action='removed from'),
'user.unsubscribed': get_user_unsubscribed_message,
# Note that we do not have a payload for visitor.signed_up
# but it should be identical to contact.signed_up
'visitor.signed_up': get_contact_signed_up_message,
}
def get_event_handler(event_type: str) -> Callable[..., Tuple[str, str]]:
handler = EVENT_TO_FUNCTION_MAPPER.get(event_type) # type: Any
if handler is None:
raise UnexpectedWebhookEventType("Intercom", event_type)
return handler
@api_key_only_webhook_view('Intercom')
@has_request_variables
def api_intercom_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event_type = payload['topic']
if event_type == 'ping':
return json_success()
topic, body = get_event_handler(event_type)(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| {
"repo_name": "rishig/zulip",
"path": "zerver/webhooks/intercom/view.py",
"copies": "2",
"size": "10684",
"license": "apache-2.0",
"hash": -4254489288972237300,
"line_mean": 34.9730639731,
"line_max": 104,
"alpha_frac": 0.6616435792,
"autogenerated": false,
"ratio": 3.429855537720706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5091499116920706,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from http.cookies import SimpleCookie
from http.client import responses
import json
import mimetypes
import os.path
import random
import re
import string
import time
from urllib.parse import parse_qs
import bleach
import redis
ROOT = os.path.dirname(__file__)
POOL = redis.ConnectionPool()
RATE_LIMIT_DURATION = 60
RATE_LIMIT = 10
def get_template(name):
return open(os.path.join('templates/', name), 'rb')
make_key = lambda *args: ':'.join(args)
# Nick handling
def get_nicks(request):
keys = request.conn.keys(make_key(request.channel, '*', 'nick'))
return {
k.decode('utf-8'): v.decode('utf-8')
for k, v in zip(request.conn.mget(keys), keys)
} if keys else {}
def get_nick(request):
key = make_key(request.channel, request.tag, 'nick')
nick = request.conn.get(key)
if nick is None:
nick = set_nick(request, request.tag[:8])
else:
nick = nick.decode('utf-8')
request.conn.expire(key, 90)
return nick
def set_nick(request, name):
name = strip_tags(name)
if name in get_nicks(request):
raise ValueError('Nick in use!')
key = make_key(request.channel, request.tag, 'nick')
request.conn.set(key, name, ex=90)
return name
# Topic handling
def set_topic(request, topic):
key = make_key(request.channel, 'topic')
request.conn.set(key, topic)
def get_topic(request):
key = make_key(request.channel, 'topic')
return request.conn.get(key).decode('utf-8')
# Message handling
def post_message(request, message, mode='message', queue=None, **data):
if queue is None:
queue = make_key(request.channel, 'channel')
data.setdefault('message', message)
data.setdefault('sender', get_nick(request))
content = json.dumps(data)
request.conn.publish(queue, json.dumps([mode, content]))
strip_tags = partial(bleach.clean, tags=[], strip=True)
def linkify_external(attrs, new=False):
attrs['target'] = '_blank'
return attrs
# The application!
class Request(object):
def __init__(self, environ):
self.environ = environ
self.method = environ['REQUEST_METHOD']
self.path = environ.get('PATH_INFO', '/')
self.cookies = self.parse_cookies()
self.query_data = self.parse_query_data()
self.conn = redis.StrictRedis(connection_pool=POOL)
def parse_cookies(self):
cookie_data = self.environ.get('HTTP_COOKIE', '')
cookies = SimpleCookie()
if cookie_data:
cookies.load(cookie_data)
return {key: cookies.get(key).value for key in cookies.keys()}
def parse_query_data(self):
if self.method == 'POST':
size = int(self.environ.get('CONTENT_LENGTH', 0))
if not size:
return {}
src = parse_qs(self.environ['wsgi.input'].read(size))
else:
src = parse_qs(self.environ.get('QUERY_STRING', ''))
return {k.decode('utf-8'): [x.decode('utf-8') for x in v]
for k, v in src.items()}
class Response(object):
def __init__(self, content=None, status=200, content_type=None):
self.content = content or []
self.status = '{} {}'.format(status, responses[status])
self.headers = {'Content-Type': content_type or 'text/html'}
self.cookies = SimpleCookie()
def application(environ, start_response):
request = Request(environ)
# Session cookie
tag = request.cookies.get('chatterbox')
request.tag = tag or ''.join(random.choice(string.ascii_letters)
for x in range(16))
# Rate limiting
key = make_key(request.tag, 'rated')
now = int(time.time())
pipe = request.conn.pipeline(transaction=False)
pipe.zadd(key, now, now).expireat(key, now + RATE_LIMIT_DURATION)
pipe.zremrangebyscore(key, '-inf', now - RATE_LIMIT_DURATION)
size = pipe.zcard(key).execute()[-1]
if size > RATE_LIMIT:
response = Response(status=429)
else: # Dispatch
response = Response(status=404)
for pattern in URLPATTERNS:
match = re.match(pattern[0], request.path)
if match:
response = pattern[1](request, **match.groupdict())
if not tag:
response.cookies['chatterbox'] = request.tag
response.cookies['chatterbox']['path'] = b'/'
headers = list(response.headers.items()) + [
('Set-Cookie', cookie.OutputString())
for cookie in response.cookies.values()
]
start_response(response.status, headers)
return response.content
def index(request):
return Response(get_template('chat.html'))
def chat(request, channel=None):
request.channel = channel
if request.method == 'GET':
if 'text/event-stream' not in request.environ['HTTP_ACCEPT']:
return Response(get_template('chat.html'))
pubsub = request.conn.pubsub()
pubsub.subscribe([
make_key(request.channel, 'channel'),
make_key(request.tag, 'private'),
])
def _iterator():
for msg in pubsub.listen():
if msg['type'] == 'message':
mode, data = json.loads(msg['data'].decode('utf-8'))
yield 'event: {}\n'.format(mode).encode('utf-8')
for line in data.splitlines():
yield 'data: {}\n'.format(line).encode('utf-8')
yield '\n'.encode('utf-8')
post_message(request, '{} connected.'.format(get_nick(request)),
'join', sender='Notice')
response = Response(_iterator(), content_type='text/event-stream')
response.headers['Cache-Control'] = 'no-cache'
elif request.method == 'POST':
nick = get_nick(request)
mode = request.query_data.get('mode', ['message'])[0]
msg = request.query_data.get('message', [''])[0]
msg = bleach.linkify(strip_tags(msg), callbacks=[linkify_external])
if mode == 'nick' and msg:
try:
new_nick = set_nick(request, msg)
except ValueError:
post_message(request, 'Nick in use!', 'alert', sender='Notice')
else:
post_message(request,
'{} is now known as {}'.format(nick, new_nick),
mode='nick',
sender='Notice')
elif mode == 'names':
post_message(request, list(get_nicks(request).keys()), 'names')
elif mode == 'msg':
target = request.query_data['target'][0]
nicks = get_nicks(request)
_, target_tag, _ = nicks[target].split(':')
post_message(request, msg, 'msg', target=target,
queue=make_key(target_tag, 'private'))
post_message(request, msg, 'msg', target=target,
queue=make_key(request.tag, 'private'))
elif mode in ['message', 'action']:
post_message(request, msg, mode)
elif mode == 'topic':
if msg:
set_topic(request, msg)
post_message(request, get_topic(request), 'topic')
else:
pass
response = Response()
else:
response = Response(status=405)
return response
def static(request, filename):
try:
fin = open(os.path.join(ROOT, 'static/', filename), 'rb')
ctype, _ = mimetypes.guess_type(filename)
return Response(fin, content_type=ctype or 'application/octet-stream')
except IOError:
return Response(status=404)
URLPATTERNS = [
(r'^/$', index, ),
(r'^/static/(?P<filename>.*)$', static,),
(r'^/(?P<channel>.+)/$', chat, ),
]
| {
"repo_name": "funkybob/mini_chat",
"path": "chat.py",
"copies": "1",
"size": "7773",
"license": "mit",
"hash": -4048874166445350000,
"line_mean": 29.2451361868,
"line_max": 79,
"alpha_frac": 0.5822719671,
"autogenerated": false,
"ratio": 3.7806420233463034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9860377859262313,
"avg_score": 0.0005072262367982213,
"num_lines": 257
} |
from functools import partial
from httplib import FORBIDDEN
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.formats import date_format
from django.utils.timezone import localtime
from opendebates.forms import TopSubmissionForm
from opendebates.models import TopSubmission
from .factories import (SubmissionFactory, TopSubmissionCategoryFactory, UserFactory,
SiteFactory, DebateFactory)
# Force the reverse() used here in the tests to always use the full
# urlconf, despite whatever machinations have taken place due to the
# DebateMiddleware.
old_reverse = reverse
reverse = partial(old_reverse, urlconf='opendebates.urls')
class TopArchiveTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
self.url = lambda slug: reverse('top_archive', args=[self.debate.prefix, slug])
self.mod_url = reverse('moderation_add_to_top_archive',
kwargs={'prefix': self.debate.prefix})
self.categories = [TopSubmissionCategoryFactory()
for i in range(3)]
self.ideas = [SubmissionFactory() for i in range(10)]
def tearDown(self):
Site.objects.clear_cache()
def test_form_copies_submission(self):
idea = self.ideas[0]
original_headline = idea.headline
form = TopSubmissionForm(data={'category': self.categories[0].id,
'submission': idea.id,
'rank': 1},
debate=self.debate)
archive = form.save()
id = archive.id
# The archive gets a copy of the idea's headline, followup,
# and vote count at the moment of its creation:
self.assertEqual(archive.headline, original_headline)
self.assertEqual(archive.followup, idea.followup)
self.assertEqual(archive.votes, idea.votes)
self.assertEqual(archive.current_votes, idea.current_votes)
# These are just copies; if the underlying idea changes
# because of additional platform use or moderator edits,
# the archive remains frozen:
idea.headline = "An entirely new headline"
idea.followup = "Some totally different followup text"
idea.votes += 1500
idea.current_votes += 1500
idea.save()
archive = TopSubmission.objects.get(id=id)
self.assertNotEqual(archive.headline, idea.headline)
self.assertEqual(archive.headline, original_headline)
self.assertNotEqual(archive.followup, idea.followup)
self.assertEqual(archive.votes, idea.votes - 1500)
self.assertEqual(archive.current_votes, idea.current_votes - 1500)
# Even if the idea is deleted altogether, the archive remains.
idea.delete()
archive = TopSubmission.objects.get(id=id)
self.assertEqual(archive.submission, None)
self.assertEqual(archive.headline, original_headline)
def test_top_archive_view(self):
self.ideas[0].votes = 1000
self.ideas[0].save()
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[0].id,
'rank': 1},
debate=self.debate).save()
self.ideas[3].votes = 4000
self.ideas[3].save()
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[3].id,
'rank': 2},
debate=self.debate).save()
self.ideas[2].votes = 5000
self.ideas[2].save()
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[2].id,
'rank': 2},
debate=self.debate).save()
# The "Top Questions" view will contain archived submissions
rsp = self.client.get(self.url(self.categories[0].slug))
self.assertContains(rsp, self.ideas[0].headline)
self.assertNotContains(rsp, self.ideas[1].headline)
# Submissions will appear in rank order, regardless of vote count
self.assertLess(rsp.content.find(self.ideas[0].headline),
rsp.content.find(self.ideas[3].headline))
# If two submissions have the same rank, the one that was archived
# earlier in time will appear first
self.assertLess(rsp.content.find(self.ideas[3].headline),
rsp.content.find(self.ideas[2].headline))
def test_top_archive_view_does_not_link(self):
"""The archive view does not link to its individual entries"""
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[2].id,
'rank': 2},
debate=self.debate).save()
rsp = self.client.get(self.url(self.categories[0].slug))
self.assertNotContains(rsp, self.ideas[2].get_absolute_url())
def test_top_archive_view_has_metadata_iff_idea_exists(self):
"""
As long as the original submission still exists, the archive
will display info on its author, submission date, and category
"""
self.ideas[2].voter.display_name = 'George Washington'
self.ideas[2].voter.save()
self.ideas[2].category.name = 'Environmental Issues'
self.ideas[2].category.save()
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[2].id,
'rank': 2},
debate=self.debate).save()
rsp = self.client.get(self.url(self.categories[0].slug))
self.assertContains(rsp, self.ideas[2].voter.user_display_name())
self.assertContains(rsp, self.ideas[2].category.name)
self.assertContains(rsp, date_format(localtime(self.ideas[2].created_at)))
self.ideas[2].delete()
rsp = self.client.get(self.url(self.categories[0].slug))
self.assertNotContains(rsp, self.ideas[2].voter.user_display_name())
self.assertNotContains(rsp, self.ideas[2].category.name)
self.assertNotContains(rsp, date_format(localtime(self.ideas[2].created_at)))
self.assertContains(rsp, self.ideas[2].headline)
def test_multiple_archives(self):
# An idea can appear in multiple archive categories
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[2].id,
'rank': 1},
debate=self.debate).save()
TopSubmissionForm(data={'category': self.categories[1].id,
'submission': self.ideas[2].id,
'rank': 2},
debate=self.debate).save()
rsp0 = self.client.get(self.url(self.categories[0].slug))
rsp1 = self.client.get(self.url(self.categories[1].slug))
self.assertContains(rsp0, self.ideas[2].headline)
self.assertContains(rsp1, self.ideas[2].headline)
# Each archive category has an entirely independent list
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[3].id,
'rank': 2},
debate=self.debate).save()
TopSubmissionForm(data={'category': self.categories[1].id,
'submission': self.ideas[4].id,
'rank': 1},
debate=self.debate).save()
rsp0 = self.client.get(self.url(self.categories[0].slug))
rsp1 = self.client.get(self.url(self.categories[1].slug))
self.assertContains(rsp0, self.ideas[3].headline)
self.assertContains(rsp1, self.ideas[4].headline)
self.assertNotContains(rsp1, self.ideas[3].headline)
self.assertNotContains(rsp0, self.ideas[4].headline)
def test_idea_once_per_category(self):
TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[0].id,
'rank': 1},
debate=self.debate).save()
form = TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[0].id,
'rank': 2},
debate=self.debate)
self.assertFalse(form.is_valid())
self.assertEquals(
form.non_field_errors(),
[u'Top submission with this Category and Submission already exists.'])
def test_idea_from_different_debate(self):
debate = DebateFactory(site=self.site)
self.ideas[0].category.debate = debate
self.ideas[0].category.save()
form = TopSubmissionForm(data={'category': self.categories[0].id,
'submission': self.ideas[0].id,
'rank': 1},
debate=self.debate)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors.get('submission'),
[u'This submission does not exist or is not in this debate.'])
def test_moderator_view_only_accessible_to_superusers(self):
self.client.logout()
rsp = self.client.get(self.mod_url)
self.assertEqual(rsp.status_code, FORBIDDEN)
password = 'secretpassword'
user = UserFactory(password=password,
is_staff=False, is_superuser=False)
self.client.login(username=user.username, password=password)
rsp = self.client.get(self.mod_url)
self.assertEqual(rsp.status_code, FORBIDDEN)
user = UserFactory(password=password,
is_staff=True, is_superuser=False)
self.client.login(username=user.username, password=password)
rsp = self.client.get(self.mod_url)
self.assertEqual(rsp.status_code, FORBIDDEN)
user = UserFactory(password=password,
is_staff=True, is_superuser=True)
self.client.login(username=user.username, password=password)
rsp = self.client.get(self.mod_url)
self.assertEqual(rsp.status_code, 200)
def test_moderator_view(self):
password = 'secretpassword'
user = UserFactory(password=password,
is_staff=True, is_superuser=True)
self.client.login(username=user.username, password=password)
data = {
'category': self.categories[0].id,
'submission': self.ideas[0].id,
'rank': 1,
}
rsp = self.client.post(self.mod_url, data=data)
self.assertRedirects(rsp, self.url(self.categories[0].slug))
self.assertContains(self.client.get(
self.url(self.categories[0].slug)),
self.ideas[0].headline)
| {
"repo_name": "caktus/django-opendebates",
"path": "opendebates/tests/test_top_archive.py",
"copies": "1",
"size": "11275",
"license": "apache-2.0",
"hash": -1235220486513323000,
"line_mean": 43.04296875,
"line_max": 87,
"alpha_frac": 0.5853658537,
"autogenerated": false,
"ratio": 4.107468123861566,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5192833977561566,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.