text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import reduce
from math import factorial
import click
__doc__ = """See https://jtara1.github.io/bernoulli.html
for an updated Bernoulli Trials Calc"""
@click.command()
@click.argument('trials', type=click.INT)
@click.argument('prob_of_success', type=click.FLOAT)
def bernoulli_trials(trials, prob_of_success):
def binomial_distribution(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
successes = 0
prob = 1
probabilities = []
output = ['trials = {}, success = {}'.format(trials, prob_of_success),
''.join(['-'] * 50)]
prob_of_failure = 1 - prob_of_success
while (prob >= 0.0001 or successes <= 2) and trials >= successes:
prob = binomial_distribution(trials, successes) \
* prob_of_success ** successes \
* prob_of_failure ** (trials - successes)
output.append("successes = {}, probability = {:.3}"
.format(successes, prob))
successes += 1
probabilities.append(prob)
output.append(''.join(['-'] * 50))
for i in range(1, len(probabilities) - 1):
output.append("successes >= {}, probability = {:.3}"
.format(i, reduce(lambda x, y: x + y, probabilities[i:])))
print('\n'.join(output))
if __name__ == '__main__':
output = bernoulli_trials()
print(output)
# bernoulli_trials(10, 0.05)
| {
"repo_name": "jtara1/MiscScripts",
"path": "misc_scripts/bernoulli_trials.py",
"copies": "1",
"size": "1406",
"license": "apache-2.0",
"hash": -4447280046224225300,
"line_mean": 32.4761904762,
"line_max": 80,
"alpha_frac": 0.5889046942,
"autogenerated": false,
"ratio": 3.6330749354005167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4721979629600517,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from math import fmod
from .. import Provider as SsnProvider
def zfix(d):
if d < 10:
return "0" + str(d)
else:
return d
class Provider(SsnProvider):
def ssn(self, dob=None, gender=None):
"""
Generates Hungarian SSN equivalent (személyazonosító szám or, colloquially, személyi szám)
:param dob: date of birth as a "YYMMDD" string - this determines the checksum regime and is also encoded
in the személyazonosító szám.
:type dob: str
:param gender: gender of the person - "F" for female, M for male.
:type gender: str
:return: személyazonosító szám in str format (11 digs)
:rtype: str
"""
# Hungarian SSNs consist of 11 decimal characters, of the following
# schema:
#
# M EEHHNN SSSK
# ↑ ↑ ↑ ↑
# gender bday ser check digit
#
#
# The M (gender) character
# ------------------------
#
# Born <= 1999 Born > 1999
# Male Female Male Female
# 1 2 3 4
#
# It also includes information on original citizenship,but this is
# ignored for the sake of simplicity.
#
# Birthday
# --------
#
# Simply encoded as EEHHNN.
#
#
# Serial
# ------
#
# These digits differentiate persons born on the same date.
#
#
# Check digit
# -----------
#
# For those born before 1996:
#
# k11 = (1k1 + 2k2 + 3k3... 10k10) mod 11
#
# That is, you multiply each digit with its ordinal, add it up and
# take it mod 11. After 1996:
#
# k11 = (10k1 + 9k2 + 8k3... 1k10) mod 11
#
if dob:
E = int(dob[0:2])
H = int(dob[2:4])
N = int(dob[4:6])
if E <= 17:
# => person born after '99 in all likelihood...
if gender:
if gender.upper() == "F":
M = 4
elif gender.upper() == "M":
M = 3
else:
raise ValueError("Unknown gender - specify M or F.")
else:
M = self.generator.random_int(3, 4)
else:
# => person born before '99.
if gender:
if gender.upper() == "F":
M = 2
elif gender.upper() == "M":
M = 1
else:
raise ValueError("Unknown gender - specify M or F.")
else:
M = self.generator.random_int(1, 2)
elif gender:
# => assume statistically that the person will be born before '99.
E = self.generator.random_int(17, 99)
H = self.generator.random_int(1, 12)
N = self.generator.random_int(1, 30)
if gender.upper() == "F":
M = 2
elif gender.upper() == "M":
M = 1
else:
raise ValueError("Unknown gender - specify M or F")
else:
M = self.generator.random_int(1, 2)
E = self.generator.random_int(17, 99)
H = self.generator.random_int(1, 12)
N = self.generator.random_int(1, 30)
H = zfix(H)
N = zfix(N)
S = f'{self.generator.random_digit()}{self.generator.random_digit()}{self.generator.random_digit()}'
vdig = f'{M}{E}{H}{N}{S}'
if 17 < E < 97:
cum = [(k + 1) * int(v) for k, v in enumerate(vdig)]
else:
cum = [(10 - k) * int(v) for k, v in enumerate(vdig)]
K = fmod(reduce(lambda x, y: x + y, cum), 11)
return vdig + str(int(K))
vat_id_formats = (
'HU########',
)
def vat_id(self):
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Hungarian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| {
"repo_name": "joke2k/faker",
"path": "faker/providers/ssn/hu_HU/__init__.py",
"copies": "1",
"size": "4300",
"license": "mit",
"hash": 5943084309570857000,
"line_mean": 29.7769784173,
"line_max": 112,
"alpha_frac": 0.4513791491,
"autogenerated": false,
"ratio": 3.6879310344827587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4639310183582759,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from math import frexp
import psutil
import time
import os
byte_names = 'KMGTPEZY'
def bytes_for_humans(byte_count: int):
# Get power of two directly from floating point exponent bits (mantissa)
power_of_2 = frexp(byte_count)[1] - 1
binary_multiple = power_of_2 // 10
# If too big, represent in largest form
if binary_multiple >= len(byte_names):
binary_multiple = len(byte_names) - 1
# Gets the magnitude of the most significant multiple of 1024
impercise_magnitude = byte_count // (1 << (binary_multiple * 10))
# If less than 1024B, just return number of bytes
if binary_multiple == 0:
return str(impercise_magnitude) + ' B'
return str(impercise_magnitude) + ' ' \
+ byte_names[binary_multiple - 1] + 'B'
def lower_bound(sequence, bound=0):
"""
Maps the given sequence such that the data points
are greater than or equal to the bound.
"""
return map(
lambda point:
point if point > bound
else bound,
sequence
)
def power_range(start, stop=None, step=2):
"""
Generates a sequence starting at start and multiplying
consecutive numbers by step until stop is reached.
"""
if stop is None:
stop = start
start = 1
assert start > 0 and start < stop and step > 1
while start < stop:
yield start
start *= step
def time_it(func):
"""
Run a function and return the time it took to execute in seconds.
"""
def timed_func(*args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
return end_time - start_time
timed_func.__name__ = func.__name__
return timed_func
def invert_array_of_dicts(array, keys):
# TODO: streamline this
result = {}
for item in array:
for key in keys:
if key not in result:
result[key] = []
result[key].append(item[key])
return result
def plot_dict(name_to_data_mapping, *args, **kwargs):
"""Creates a plot of the given data in any order."""
return plot_tuple_array(name_to_data_mapping.items(), *args, **kwargs)
def scale_axes(axes, xscale: float=1, yscale: float=1):
pos = axes.get_position()
axes.set_position([pos.x0, pos.y0, pos.width * xscale,
pos.height * yscale])
def plot_tuple_array(axes, name_to_data_mapping, x_label, y_label,
custom_x_label=None, custom_y_label=None, y_mapping=None):
"""Creates a plot of the given data in the order it is given."""
def plot_inner_arr(name, inverted_array):
data = invert_array_of_dicts(inverted_array, inverted_array[0].keys())
y_data = data[y_label]
if y_mapping is not None:
y_data = list(y_mapping(y_data))
return axes.plot(data[x_label], y_data, label=name)[0]
plots = list(map(
lambda result_tuple: plot_inner_arr(*result_tuple),
sorted(name_to_data_mapping.items())
))
axes.set_xlabel(custom_x_label if custom_x_label is not None else x_label)
axes.set_ylabel(custom_y_label if custom_y_label is not None else y_label)
return plots
def memory_percent():
current_process = psutil.Process(os.getpid())
return current_process.memory_percent() + sum(
map(
psutil.Process.memory_percent,
current_process.children(recursive=True)
)
)
| {
"repo_name": "JohnStarich/python-pool-performance",
"path": "utils.py",
"copies": "1",
"size": "3481",
"license": "mit",
"hash": -805992299645185500,
"line_mean": 29.2695652174,
"line_max": 79,
"alpha_frac": 0.6176386096,
"autogenerated": false,
"ratio": 3.6260416666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9740574686204555,
"avg_score": 0.0006211180124223602,
"num_lines": 115
} |
from functools import reduce
from math import sqrt, log
from random import randint, getrandbits
# from matplotlib import pyplot as plt
from time import time
__author__ = 'Roland'
def product(l):
rez = 1
for a in l:
rez *= a
return rez
def lcm(l):
return product(l) // reduce(gcd, l, max(l))
def gcd(a, b):
if a == 0:
return b
if b == 0:
return a
while a > 0:
temp = a
a = b % a
b = temp
return b
def trial_div(n):
if n % 2 == 0:
return 2
for i in range(3, int(sqrt(n)) + 1, 2):
if n % i == 0:
return i
return n
def factor(n, method=trial_div, B=None):
divisors = []
orig = n
while n != 1:
if method == pollard_p1 and B is not None:
div = method(n, B)
B += 2
else:
div = method(n)
divisors.append(div)
n //= div
return divisors
def pollard_p1(n, B = None):
if n < 150:
return n
for i in range(5):
if B is None:
B = randint(5, int(log(n)))
k = lcm(range(1, B))
a = randint(2, n - 2)
a = pow(a, k, n)
d = gcd(a - 1, n)
if d not in (1, n):
return d
B = None
return n
def test(a):
t = time()
print(factor(a))
t1 = time() - t
t = time()
print(factor(a, pollard_p1))
return t1, time() - t
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'graph':
pass
# time_trial = []
# time_pollard = []
# nrs = []
# r = 60
# for i in range(20):
# nrs.append(getrandbits(r))
# nrs.sort()
# for nr in nrs:
# print(nr)
# rez = test(nr)
# time_trial.append(rez[0])
# time_pollard.append(rez[1])
# print(time_pollard)
# print(time_trial)
# plt.plot(nrs, time_trial)
# plt.plot(nrs, time_pollard)
# plt.xlim([2**(r - 10), 2**r])
# plt.ylim([0, 5])
# plt.show()
else:
while True:
try:
x, b = raw_input("Give an integer to factorize and the bound for Pollards p-1 method \n").split()
print("Trial division: ")
print(factor(int(x)))
print("P-1:")
print(factor(int(x), pollard_p1, int(b)))
except Exception:
print("You must give both an integer and a bound")
| {
"repo_name": "rolisz/hw3",
"path": "Crypto/labs/l4/factorization.py",
"copies": "1",
"size": "2500",
"license": "bsd-3-clause",
"hash": 4584868555468568000,
"line_mean": 21.9357798165,
"line_max": 113,
"alpha_frac": 0.4676,
"autogenerated": false,
"ratio": 3.1446540880503147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4112254088050315,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from .models import PublicationStatus
from django_orghierarchy.models import Organization
class UserModelPermissionMixin:
"""Permission mixin for user models
A mixin class that provides permission check methods
for user models.
"""
def is_admin(self, publisher):
"""Check if current user is an admin user of the publisher organization"""
raise NotImplementedError()
def is_regular_user(self, publisher):
"""Check if current user is a regular user of the publisher organization"""
raise NotImplementedError()
@property
def admin_organizations(self):
raise NotImplementedError()
@property
def organization_memberships(self):
raise NotImplementedError()
def can_edit_event(self, publisher, publication_status):
"""Check if current user can edit (create, change, modify)
event with the given publisher and publication_status"""
if self.is_admin(publisher):
return True
if self.is_regular_user(publisher) and publication_status == PublicationStatus.DRAFT:
return True
return False
def get_editable_events(self, queryset):
"""Get editable events queryset from given queryset for current user"""
# distinct is not needed here, as admin_orgs and memberships should not overlap
return queryset.filter(
publisher__in=self.get_admin_organizations_and_descendants()
) | queryset.filter(
publication_status=PublicationStatus.DRAFT, publisher__in=self.organization_memberships.all()
)
def get_admin_tree_ids(self):
# returns tree ids for all normal admin organizations and their replacements
admin_queryset = self.admin_organizations.filter(internal_type='normal').select_related('replaced_by')
admin_tree_ids = admin_queryset.values('tree_id')
admin_replaced_tree_ids = admin_queryset.filter(replaced_by__isnull=False).values('replaced_by__tree_id')
return (set(value['tree_id'] for value in admin_tree_ids) |
set(value['replaced_by__tree_id'] for value in admin_replaced_tree_ids))
def get_admin_organizations_and_descendants(self):
# returns admin organizations and their descendants
if not self.admin_organizations.all():
return Organization.objects.none()
# regular admins have rights to all organizations below their level
admin_orgs = []
for admin_org in self.admin_organizations.all():
admin_orgs.append(admin_org.get_descendants(include_self=True))
if admin_org.replaced_by:
# admins of replaced organizations have these rights, too!
admin_orgs.append(admin_org.replaced_by.get_descendants(include_self=True))
# for multiple admin_orgs, we have to combine the querysets and filter distinct
return reduce(lambda a, b: a | b, admin_orgs).distinct()
| {
"repo_name": "City-of-Helsinki/linkedevents",
"path": "events/permissions.py",
"copies": "1",
"size": "2993",
"license": "mit",
"hash": 6197366422804372000,
"line_mean": 44.3484848485,
"line_max": 113,
"alpha_frac": 0.6819244905,
"autogenerated": false,
"ratio": 4.493993993993994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5675918484493995,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from mpi4py import MPI
from itertools import islice
from datetime import datetime as time
import logging
from collections import defaultdict
from decomposer import Decomposer
class MapReduce(object):
def __init__(self, mapper, reducer, communicator=None, subsample=1, shuffler=None, prepartitioned=False ):
self.unsafe_mapper = mapper
self.unsafe_reducer = reducer
self.unsafe_shuffler = shuffler
self.subsample = subsample
self.communicator=communicator
self.prepartitioned=prepartitioned
self.logger=logging.getLogger('performance')
# safe reduce
def safeReducer(a, b):
if a is None:
return b
if b is None:
return a
return self.unsafe_reducer(a,b)
self.reducer=safeReducer
# safe map
def safeMap(arg):
self.logger.debug("Entered mapper")
try:
result= self.unsafe_mapper(arg)
self.logger.debug("Exiting mapper")
return result
except Exception as e:
self.logger.warn("Problem with map")
self.logger.warn(str(e))
return None
self.mapper=safeMap
if shuffler:
def safeShuffler(arg, count):
try:
return self.unsafe_shuffler(arg, count)
except Exception as e:
self.logger.warn("Problem with shuffle")
self.logger.warn(str(e))
return None
self.shuffler=safeShuffler
else:
self.shuffler=None
def execute(self, data):
if self.communicator and self.communicator.size>1:
return self.parallel(data)
else:
return self.serial(data)
def serial(self, data):
try:
count=len(data)
except AttributeError:
count=None
subsampled_data=Decomposer(data, subsample=self.subsample)
quantities= map(self.mapper, subsampled_data)
result = reduce(self.reducer, quantities)
return result
def parallel(self, data):
perfLogger=logging.getLogger('performance')
# local map
if self.prepartitioned:
partition=Decomposer(data,subsample=self.subsample)
else:
partition=Decomposer(data, self.communicator, subsample=self.subsample )
perfLogger.info("Built iterator")
quantities=map(self.mapper,partition)
perfLogger.info("Mapped")
local_result=reduce(self.reducer, quantities)
perfLogger.info("Local reduce")
# reduce under mpi
def reduce_arrays(x,y,dtype):
# the signature for the user defined op takes a datatype, which we can ignore
return self.reducer(x,y)
reducer_mpi=MPI.Op.Create(reduce_arrays, True)
perfLogger.debug("Local result: "+str(local_result)[0:60])
if self.shuffler:
perfLogger.info("Shuffling")
shuffled=defaultdict(dict)
if local_result:
for key in local_result:
shuffled[self.shuffler(key, self.communicator.size)][key]=local_result[key]
for root in range(self.communicator.size):
perfLogger.info("Reducing to rank "+str(root))
temp=self.communicator.reduce(shuffled[root],op=reducer_mpi,root=root)
if self.communicator.rank==root:
result=temp
else:
result = self.communicator.reduce(local_result, op=reducer_mpi, root=0)
result = self.communicator.bcast(result, root=0)
perfLogger.info("Global reduce")
reducer_mpi.Free()
return result
| {
"repo_name": "UCL-dataspring/cluster-code",
"path": "bluclobber/harness/mapreduce.py",
"copies": "1",
"size": "3829",
"license": "mit",
"hash": -6612310923042427000,
"line_mean": 36.5392156863,
"line_max": 111,
"alpha_frac": 0.5917994254,
"autogenerated": false,
"ratio": 4.16195652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010231063830671424,
"num_lines": 102
} |
from functools import reduce
from operator import add
from fca.context import Context
from fuzzy.fca.fuzzy_context import FuzzyContext
## Search result -> context
def getContextFromSR(documents, terms, relation, maxKeywords):
keywords = [x['keywords'][:maxKeywords] for x in documents]
keywords = [[y[0] for y in x] for x in keywords]
keywords = sorted(list(set(terms + reduce(add, keywords, []))))
sites = _selectColumn('url', documents)
ids = _selectColumn('id', documents)
table = _getTable(relation, ids, keywords)
return Context(table, sites, keywords)
def getFuzzyContext(documents, terms, keywordsScoreTable, termFrequency):
keywords = [x['keywords'] for x in documents]
keywords = [[y[0] for y in x] for x in keywords]
keywords = sorted(list(set(terms + reduce(add, keywords, []))))
sites = _selectColumn('url', documents)
ids = _selectColumn('id', documents)
table = getFuzzyTable(keywordsScoreTable, ids, keywords, termFrequency, terms)
fContext = FuzzyContext(table, sites, keywords)
return fContext
def getFuzzyTable(keywordsScoreTable, objects, attributes, termFrequency, terms):
table = []
for keywordsLine, objID in zip(map(lambda x: keywordsScoreTable[x], objects), objects):
line = []
for attr in attributes:
if attr in terms:
line.append(termFrequency(attr, objID))
else:
line.append(keywordsLine.get(attr, 0))
table.append(line)
return table
def _getTable(relation, ids, keywords):
table = []
for id in ids:
line = []
for keyword in keywords:
line.append(relation(keyword, id))
table.append(line)
return table
def _selectColumn(name, table):
return [x[name] for x in table]
## Context -> slf
def context2slf(context):
text = ['[Lattice]']
text.append(str(context.height))
text.append(str(context.width))
text.append('[Objects]')
for obj in context.objects:
text.append(obj)
text.append('[Attributes]')
for attr in context.attributes:
text.append(attr)
text.append('[relation]')
for line in context.table:
string = ''
for value in line:
string += '1 ' if value else '0 '
text.append(string)
return '\n'.join(text) | {
"repo_name": "havrlant/fca-search",
"path": "src/fca_extension/utilities.py",
"copies": "1",
"size": "2154",
"license": "bsd-2-clause",
"hash": -9062217105849639000,
"line_mean": 24.6547619048,
"line_max": 88,
"alpha_frac": 0.7028783658,
"autogenerated": false,
"ratio": 3.2537764350453173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.840020477928162,
"avg_score": 0.2112900043127394,
"num_lines": 84
} |
from functools import reduce
from operator import add
from pygame.math import Vector2 as V2
import pygame as pg, os
from src.display.tkinter_windows import create_menu
from src.core import constants
def init_display():
pg.init()
info = pg.display.Info()
dims = (int(info.current_w * 0.6), int(info.current_h * 0.75))
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.display.set_icon(pg.image.load('AtomIcon.png'))
screen = pg.display.set_mode(dims, pg.RESIZABLE)
pg.display.set_caption("Physics Simulator 2.0")
return screen, V2(dims)
def refresh_display(settings_window, screen, bodies, cam):
screen.fill(settings_window.bg_color) # comment out this line for a fun time ;)
if settings_window.walls.get():
pg.draw.rect(screen, (0, 0, 0), pg.Rect(0, 0, *cam.dims), 3)
for b in bodies:
# Calculate coordinates and radius adjusted for camera
x, y = (b.position - cam.position - cam.dims / 2) * cam.scale + cam.dims / 2
pg.draw.circle(screen, b.color, (int(x), int(y)), int(b.radius * cam.scale), 0)
# The radius should be calculated in such a way that the camera can be zoomed indefinitely.
# Currently, the properties of an object can reach a distinct threshold, after which they become invisible.
pg.display.update()
def update_windows(settings_window):
arr = [0, 0, [0] * 5]
if settings_window.alive:
settings_window.update()
try:
arr = [settings_window.gravity_slider.get() / 100, settings_window.COR_slider.get(),
[settings_window.time_slider.get() / 100,
settings_window.collision.get(), settings_window.walls.get(), settings_window.g_field.get(),
settings_window.gravity_on.get()]]
except:
pass
for window in settings_window.properties_windows:
if window.alive:
window.update()
else:
settings_window.properties_windows.remove(window)
return arr
def handle_mouse(*args):
settings_window, camera, event, bodies, dims, G, COR, scroll = args
if event.button == 1:
pos = camera.position + (pg.mouse.get_pos() - dims / 2) / camera.scale + dims / 2
for b in bodies:
if b.click_collision(pos) and b not in [win.body for win in settings_window.properties_windows]:
if not settings_window.alive: # Respawn the main window if it is dead
settings_window.__init__(bodies, camera, dims, [G, COR]) # This still does not fix all errors
settings_window.properties_windows.append(
create_menu("BodyProperties", bodies, camera, dims, len(settings_window.properties_windows), b))
elif event.button == 4:
camera.scale = min(camera.scale * 1.1, 100)
scroll.scale /= 1.1
elif event.button == 5:
camera.scale = max(camera.scale / 1.1, 0.01)
scroll.scale *= 1.1
def handle_events(*args):
settings_window, camera, scroll, done, dims, screen, bodies, G, COR = args
for event in pg.event.get():
if event.type == pg.VIDEORESIZE:
width, height = event.w, event.h
dims, screen = V2(width, height), pg.display.set_mode((width, height), pg.RESIZABLE)
elif event.type == pg.KEYDOWN:
scroll.key(event.key, 1)
camera.key_down(event.key)
elif event.type == pg.KEYUP:
scroll.key(event.key, 0)
camera.key_up(event.key)
elif event.type == pg.MOUSEBUTTONDOWN:
handle_mouse(settings_window, camera, event, bodies, dims, G, COR, scroll)
done |= event.type == pg.QUIT
return done, dims, screen
def handle_bodies(*args):
G, COR, time_factor, collision, walls, g_field, gravity, scroll, bodies, camera, dims, frame_count, settings_window = args
for body in bodies: # Reset previous calculations
body.acceleration = V2(0, 0)
for b, body in enumerate(bodies): # Calculate forces and set acceleration, if mutual gravitation is enabled
for o in range(len(bodies) - 1, b, -1):
if collision and bodies[o].test_collision(body):
if not COR: # Only remove second body if collision is perfectly inelastic
bodies[o].merge(bodies[b], settings_window.properties_windows)
bodies.pop(b)
break
bodies[o].collide(bodies[b], COR)
if gravity:
force = body.force_of(bodies[o], G) # This is a misnomer; `force` is actually acceleration / mass
body.acceleration += bodies[o].mass * force
bodies[o].acceleration -= body.mass * force
body.acceleration.y += G / 50 * g_field # Uniform gravitational field
body.apply_motion(time_factor)
body.position += scroll.val
if not frame_count % 100 and body.position.length() > 100000: # TODO: find a good value from this boundary
bodies.remove(body)
for window in settings_window.properties_windows:
if window.body is body:
settings_window.properties_windows.remove(window)
window.destroy()
break
if walls: # Wall collision
d, r = ((body.position - camera.position) - dims / 2) * camera.scale + dims / 2, body.radius * camera.scale
for i in 0, 1:
x = d[i] # x is the dimension (x,y) currently being tested / edited
if x <= r or x >= dims[i] - r:
body.velocity[i] *= -COR # Reflect the perpendicular velocity
body.position[i] = (2 * (x < r) - 1) * (r - dims[i] / 2) / camera.scale + dims[i] / 2 + \
camera.position[i] # Place body back into frame
class Scroll:
def __init__(self):
self.down, self.map, self.val, self.scale = [0, 0, 0, 0], [pg.K_a, pg.K_w, pg.K_d, pg.K_s], V2(0, 0), 1
def key(self, key, down):
if key in self.map:
self.down[self.map.index(key)] = down
def update_value(self):
self.val = (self.val + self.scale * (V2(self.down[:2]) - self.down[2:])) * .95
class Camera:
def __init__(self, dims):
self.position, self.velocity, self.dims, self.scale, self.map = V2(0, 0), V2(0, 0), dims, 1, [pg.K_RIGHT,
pg.K_LEFT,
pg.K_UP,
pg.K_DOWN]
def key_down(self, key):
if key in self.map:
self.velocity = V2((3 / self.scale, 0) if key in self.map[:2] else (0, 3 / self.scale)).elementwise() * (
(self.map.index(key) not in (1, 2)) * 2 - 1)
def key_up(self, key):
if key in self.map:
self.velocity = self.velocity.elementwise() * ((0, 1) if key in self.map[:2] else (1, 0))
def move_to_com(self, bodies):
total_mass = sum(b.mass for b in bodies)
self.position = reduce(add, (b.position * b.mass for b in bodies)) / total_mass - self.dims / 2
def move_to_body(self, body):
self.position = body.position - self.dims / 2
def apply_velocity(self):
self.position += self.velocity
def main():
screen, dims = init_display()
bodies, camera, scroll = [], Camera(dims), Scroll()
settings_window, clock, done, frame_count = create_menu("Settings", bodies, camera, dims,
[constants.G, constants.COR]), pg.time.Clock(), False, 0
while not done:
clock.tick(constants.clock_speed)
frame_count += 1
camera.apply_velocity()
G, COR, misc_settings = update_windows(settings_window)
done, dims, screen = handle_events(settings_window, camera, scroll, done, dims, screen, bodies, G, COR)
handle_bodies(G, COR, *misc_settings, scroll, bodies, camera, dims, frame_count, settings_window)
refresh_display(settings_window, screen, bodies, camera)
scroll.update_value()
pg.quit()
if settings_window.alive: settings_window.destroy()
if __name__ == "__main__":
main()
| {
"repo_name": "rschwa6308/Physics-2.0",
"path": "Physics 2.0.py",
"copies": "1",
"size": "8393",
"license": "mit",
"hash": -6545692374609549000,
"line_mean": 43.4074074074,
"line_max": 126,
"alpha_frac": 0.5721434529,
"autogenerated": false,
"ratio": 3.6270527225583407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46991961754583406,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import add
import numpy as np
import sym
# Real-life example (ion speciation problem in water chemistry)
_ref = np.array([37.252574322668998, 22.321937961124899, 10.9011158998744,
20.190422234652999, 27.8679190043357, 33.933606208922598,
33.552055153126204, 31.440168027241697, 37.999293413509498,
41.071619997204103, -20.619381941508539, 111.68831884983794,
29.210791083803763, 18.901100113049495, 17.18281828459045])
def get_syms_exprs(backend):
x = backend.symarray('x', 14)
p = backend.symarray('p', 14)
syms = np.concatenate((x, p))
exp = backend.exp
exprs = [
x[0] + x[1] - x[4] + 36.252574322669,
x[0] - x[2] + x[3] + 21.3219379611249,
x[3] + x[5] - x[6] + 9.9011158998744,
2*x[3] + x[5] - x[7] + 18.190422234653,
3*x[3] + x[5] - x[8] + 24.8679190043357,
4*x[3] + x[5] - x[9] + 29.9336062089226,
-x[10] + 5*x[3] + x[5] + 28.5520551531262,
2*x[0] + x[11] - 2*x[4] - 2*x[5] + 32.4401680272417,
3*x[1] - x[12] + x[5] + 34.9992934135095,
4*x[1] - x[13] + x[5] + 37.0716199972041,
(
p[0] - p[1] + 2*p[10] + 2*p[11] - p[12] - 2*p[13] +
p[2] + 2*p[5] + 2*p[6] + 2*p[7] + 2*p[8] + 2*p[9] -
exp(x[0]) + exp(x[1]) - 2*exp(x[10]) - 2*exp(x[11]) +
exp(x[12]) + 2*exp(x[13]) - exp(x[2]) - 2*exp(x[5]) -
2*exp(x[6]) - 2*exp(x[7]) - 2*exp(x[8]) - 2*exp(x[9])
), (
-p[0] - p[1] - 15*p[10] - 2*p[11] - 3*p[12] - 4*p[13] -
4*p[2] - 3*p[3] - 2*p[4] - 3*p[6] - 6*p[7] - 9*p[8] -
12*p[9] + exp(x[0]) + exp(x[1]) + 15*exp(x[10]) +
2*exp(x[11]) + 3*exp(x[12]) + 4*exp(x[13]) + 4*exp(x[2]) +
3*exp(x[3]) + 2*exp(x[4]) + 3*exp(x[6]) + 6*exp(x[7]) +
9*exp(x[8]) + 12*exp(x[9])
), (
-5*p[10] - p[2] - p[3] - p[6] - 2*p[7] - 3*p[8] - 4*p[9] +
5*exp(x[10]) + exp(x[2]) + exp(x[3]) + exp(x[6]) +
2*exp(x[7]) + 3*exp(x[8]) + 4*exp(x[9])
), (
-p[1] - 2*p[11] - 3*p[12] - 4*p[13] - p[4] + exp(x[1]) +
2*exp(x[11]) + 3*exp(x[12]) + 4*exp(x[13]) + exp(x[4])
), (
-p[10] - 2*p[11] - p[12] - p[13] - p[5] - p[6] - p[7] -
p[8] - p[9] + exp(x[10]) + 2*exp(x[11]) + exp(x[12]) +
exp(x[13]) + exp(x[5]) + exp(x[6]) + exp(x[7]) +
exp(x[8]) + exp(x[9])
)
]
return syms, exprs
class TimeLambdifyInit:
params = ['sympy', 'symengine', 'pysym', 'symcxx']
def time_init(self, name):
be = sym.Backend(name)
self.syms, self.exprs = get_syms_exprs(be)
cb = be.Lambdify(self.syms, self.exprs)
backend_names = list(sym.Backend.backends.keys())
n_backends = len(backend_names)
_backend_numba = list(zip(backend_names, zip(*[[False]*n_backends]*2))) + [('sympy', (True, False)), ('sympy', (True, True))]
class TimeLambdifyEval:
params = ([1, 100], _backend_numba)
param_names = ('n', 'backend_numba')
def setup(self, n, backend_numba):
name, (use_numba, warm_up) = backend_numba
self.inp = np.ones(28)
self.backend = sym.Backend(name)
self.syms, self.exprs = get_syms_exprs(self.backend)
kwargs = {'use_numba': use_numba} if name == 'sympy' else {}
self.lmb = self.backend.Lambdify(self.syms, self.exprs, **kwargs)
self.values = {}
if warm_up:
self.time_evaluate(n, backend_numba)
def time_evaluate(self, n, backend_numba):
name, (use_numba, warm_up) = backend_numba
for i in range(n):
res = self.lmb(self.inp)
if not np.allclose(res, _ref):
raise ValueError('Incorrect result')
def _mk_long_evaluator(backend, n, **kwargs):
x = backend.symarray('x', n)
p, q, r = 17, 42, 13
terms = [i*s for i, s in enumerate(x, p)]
exprs = [reduce(add, terms), r + x[0], -99]
callback = backend.Lambdify(x, exprs, **kwargs)
input_arr = np.arange(q, q + n*n).reshape((n, n))
ref = np.empty((n, 3))
coeffs = np.arange(p, p + n)
for i in range(n):
ref[i, 0] = coeffs.dot(np.arange(q + n*i, q + n*(i+1)))
ref[i, 1] = q + n*i + r
ref[:, 2] = -99
return callback, input_arr, ref
class TimeLambdifyManyArgs:
params = ([100, 200, 300], _backend_numba)
param_names = ('n', 'backend_numba')
def setup(self, n, backend_numba):
name, (use_numba, warm_up) = backend_numba
self.backend = sym.Backend(name)
kwargs = {'use_numba': use_numba} if name == 'sympy' else {}
self.callback, self.input_arr, self.ref = _mk_long_evaluator(self.backend, n, **kwargs)
if warm_up:
self.time_evaluate(n, backend_numba)
def time_evaluate(self, n, backend_numba):
name, (use_numba, warm_up) = backend_numba
out = self.callback(self.input_arr)
if not np.allclose(out, self.ref):
raise ValueError('Incorrect result')
| {
"repo_name": "bjodah/sym",
"path": "benchmarks/benchmarks/Lambdify.py",
"copies": "1",
"size": "5088",
"license": "bsd-2-clause",
"hash": -104008510235055380,
"line_mean": 36.6888888889,
"line_max": 125,
"alpha_frac": 0.5123820755,
"autogenerated": false,
"ratio": 2.4723032069970845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3484685282497085,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import add
import re
from typing import Any, List, Dict, Set, Tuple, Optional
from werkzeug.datastructures import ImmutableOrderedMultiDict
EMAIL_REGEX = r'^[^@^\s]+@[^@^\.^\s]+(\.[^@^\.^\s]+)+$'
def get_validator(framework, content, answers):
"""
Retrieves a validator by slug contained in the framework dictionary.
"""
if framework is None:
raise ValueError("a framework dictionary must be provided")
if framework is not None:
validator_cls = VALIDATORS.get(framework['slug'], SharedValidator)
return validator_cls(content, answers)
class DeclarationValidator(object):
email_validation_fields: Set[str] = set()
number_string_fields: List[Tuple[str, int]] = []
character_limit: Optional[int] = None
word_limit: Optional[int] = None
optional_fields: Set[str] = set()
def __init__(self, content, answers):
self.content = content
self.answers = answers
def get_error_messages_for_page(self, section) -> ImmutableOrderedMultiDict:
all_errors = self.get_error_messages()
page_ids = section.get_question_ids()
return ImmutableOrderedMultiDict(filter(lambda err: err[0] in page_ids, all_errors))
def get_error_messages(self) -> List[Tuple[str, dict]]:
raw_errors_map = self.errors()
errors_map = list()
for question_id in self.all_fields():
if question_id in raw_errors_map:
question_number = self.content.get_question(question_id).get('number')
validation_message = self.get_error_message(question_id, raw_errors_map[question_id])
errors_map.append((question_id, {
'input_name': question_id,
'href': self.content.get_question(question_id).get('href') or None,
'question': "Question {}".format(question_number)
if question_number else self.content.get_question(question_id).get('question'),
'message': validation_message,
}))
return errors_map
def get_error_message(self, question_id: str, message_key: str) -> str:
for validation in self.content.get_question(question_id).get('validations', []):
if validation['name'] == message_key:
return validation['message'] # type: ignore
default_messages = {
'answer_required': 'You need to answer this question.',
'under_character_limit': 'Your answer must be no more than {} characters.'.format(self.character_limit),
'invalid_format': 'You must enter a valid email address.',
}
return default_messages.get(
message_key, 'There was a problem with the answer to this question')
def all_fields(self) -> List[str]:
return reduce(add, (section.get_question_ids() for section in self.content))
def fields_with_values(self) -> Set[str]:
return set(key for key, value in self.answers.items()
if value is not None and (not isinstance(value, str) or len(value) > 0))
def errors(self) -> Dict[str, str]:
errors_map = {}
errors_map.update(self.character_limit_errors())
errors_map.update(self.word_limit_errors())
errors_map.update(self.formatting_errors(self.answers))
errors_map.update(self.answer_required_errors())
return errors_map
def answer_required_errors(self) -> Dict[str, str]:
req_fields = self.get_required_fields()
filled_fields = self.fields_with_values()
errors_map = {}
for field in req_fields - filled_fields:
errors_map[field] = 'answer_required'
return errors_map
def character_limit_errors(self) -> Dict[str, str]:
errors_map = {}
for question_id in self.all_fields():
if self.content.get_question(question_id).get('type') in ['text', 'textbox_large']:
answer = self.answers.get(question_id) or ''
if self.character_limit is not None and len(answer) > self.character_limit:
errors_map[question_id] = "under_character_limit"
return errors_map
def word_limit_errors(self) -> Dict[str, str]:
errors_map = {}
for question_id in self.all_fields():
question = self.content.get_question(question_id)
if question.get('type') in ['text', 'textbox_large']:
# Get word limit from question content, fall back to class attribute
word_limit = question.get('max_length_in_words', self.word_limit)
answer = self.answers.get(question_id) or ''
if word_limit is not None and len(answer.split()) > word_limit:
errors_map[question_id] = "under_word_limit"
return errors_map
def formatting_errors(self, answers) -> Dict[str, str]:
errors_map = {}
if self.email_validation_fields is not None and len(self.email_validation_fields) > 0:
for field in self.email_validation_fields:
if self.answers.get(field) is None or not re.match(EMAIL_REGEX, self.answers.get(field, '')):
errors_map[field] = 'invalid_format'
if self.number_string_fields is not None and len(self.number_string_fields) > 0:
for field, length in self.number_string_fields:
if self.answers.get(field) is None or not re.match(
r'^\d{{{0}}}$'.format(length), self.answers.get(field, '')
):
errors_map[field] = 'invalid_format'
return errors_map
def get_required_fields(self) -> Set[str]:
try:
req_fields = self.required_fields # type: ignore
except AttributeError:
req_fields = set(self.all_fields())
# Remove optional fields
if self.optional_fields is not None:
req_fields -= set(self.optional_fields)
return req_fields # type: ignore
class G7Validator(DeclarationValidator):
"""
Validator for G-Cloud 7.
"""
optional_fields = {
"SQ1-1p-i", "SQ1-1p-ii", "SQ1-1p-iii", "SQ1-1p-iv",
"SQ1-1q-i", "SQ1-1q-ii", "SQ1-1q-iii", "SQ1-1q-iv", "SQ1-1cii", "SQ1-1i-ii",
"SQ1-1j-i", "SQ1-1j-ii", "SQ4-1c", "SQ3-1k", "SQ1-1i-i"
}
email_validation_fields = {'SQ1-1o', 'SQ1-2b'}
character_limit = 5000
def get_required_fields(self) -> Set[str]:
req_fields = super(G7Validator, self).get_required_fields()
# If you answered other to question 19 (trading status)
if self.answers.get('SQ1-1ci') == 'other (please specify)':
req_fields.add('SQ1-1cii')
# If you answered yes to question 27 (non-UK business registered in EU)
if self.answers.get('SQ1-1i-i', False):
req_fields.add('SQ1-1i-ii')
# If you answered 'licensed' or 'a member of a relevant organisation' in question 29
answer_29 = self.answers.get('SQ1-1j-i', [])
if answer_29 and len(answer_29) > 0 and (
'licensed' in answer_29
or 'a member of a relevant organisation' in answer_29
):
req_fields.add('SQ1-1j-ii')
# If you answered yes to either question 53 or 54 (tax returns)
if self.answers.get('SQ4-1a', False) or self.answers.get('SQ4-1b', False):
req_fields.add('SQ4-1c')
# If you answered Yes to questions 39 - 51 (discretionary exclusion)
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
if any(self.answers.get(field) for field in dependent_fields):
req_fields.add('SQ3-1k')
# If you answered No to question 26 (established in the UK)
if 'SQ5-2a' in self.answers and not self.answers['SQ5-2a']:
req_fields.add('SQ1-1i-i')
req_fields.add('SQ1-1j-i')
return req_fields
class DOSValidator(DeclarationValidator):
optional_fields = {
"mitigatingFactors", "mitigatingFactors2", "mitigatingFactors3", "tradingStatusOther",
"modernSlaveryStatement", "modernSlaveryStatementOptional", "modernSlaveryReportingRequirements",
# Registered in UK = no
"appropriateTradeRegisters", "appropriateTradeRegistersNumber",
"licenceOrMemberRequired", "licenceOrMemberRequiredDetails",
}
dependent_fields = {
# If you responded yes to any of questions 22 to 34
"mitigatingFactors": [
'misleadingInformation', 'confidentialInformation', 'influencedContractingAuthority',
'witheldSupportingDocuments', 'seriousMisrepresentation', 'significantOrPersistentDeficiencies',
'distortedCompetition', 'conflictOfInterest', 'distortingCompetition', 'graveProfessionalMisconduct',
'bankrupt', 'environmentalSocialLabourLaw', 'taxEvasion'
],
# If you responded yes to either 36 or 37
"mitigatingFactors2": [
"unspentTaxConvictions", "GAAR"
],
# If you responded yes to 50 (Modern Slavery)
"modernSlaveryStatement": [
"modernSlaveryTurnover",
],
"modernSlaveryReportingRequirements": [
"modernSlaveryTurnover"
],
}
email_validation_fields = {"contactEmailContractNotice", "primaryContactEmail"}
character_limit = 5000
def get_required_fields(self) -> Set[str]:
req_fields = super(DOSValidator, self).get_required_fields()
for target_field, fields in self.dependent_fields.items():
if any(self.answers.get(field) for field in fields):
req_fields.add(target_field)
# Describe your trading status
if self.answers.get('tradingStatus') == "other (please specify)":
req_fields.add('tradingStatusOther')
# If your company was not established in the UK
if self.answers.get('establishedInTheUK') is False:
req_fields.add('appropriateTradeRegisters')
# If yes to appropriate trade registers
if self.answers.get('appropriateTradeRegisters') is True:
req_fields.add('appropriateTradeRegistersNumber')
req_fields.add('licenceOrMemberRequired')
# If not 'none of the above' to licenceOrMemberRequired
if self.answers.get('licenceOrMemberRequired') in ['licensed', 'a member of a relevant organisation']:
req_fields.add('licenceOrMemberRequiredDetails')
# If supplier doesn't meet the Modern Slavery reporting requirements, they don't need to upload a statement
# but must have mitigatingFactors3 explanation
if self.answers.get('modernSlaveryReportingRequirements') is False:
req_fields.add('mitigatingFactors3')
req_fields.remove('modernSlaveryStatement')
return req_fields
class SharedValidator(DOSValidator):
# From DOS2 and G8 onwards, validate DUNS number length
number_string_fields = [('dunsNumber', 9)]
word_limit = 500
class G12Validator(SharedValidator):
def errors(self) -> Dict[str, str]:
errors_map = super().errors()
q1_answer = self.answers.get('servicesHaveOrSupportCloudHostingCloudSoftware')
q2_answer = self.answers.get('servicesHaveOrSupportCloudSupport')
q1_negative = "My organisation isn't submitting cloud hosting (lot 1) or cloud software (lot 2) services"
q2_negative = "My organisation isn't submitting cloud support (lot 3) services"
if q1_answer == q1_negative and q2_answer == q2_negative:
errors_map.update({
'servicesHaveOrSupportCloudHostingCloudSoftware': 'dependent_question_error',
'servicesHaveOrSupportCloudSupport': 'dependent_question_error',
})
return errors_map
def is_valid_percentage(value: Any) -> bool:
# Design System guidance is that we should allow users to provide answers with or without units.
if isinstance(value, str):
value = value.rstrip('%')
try:
number = float(value)
except ValueError:
return False
else:
return 0 <= number <= 100
class DOS5Validator(SharedValidator):
"""Following an accessibility review, a number of questions and answers were changed for DOS 5"""
percentage_fields = ["subcontractingInvoicesPaid"]
optional_fields = SharedValidator.optional_fields.union({
"subcontracting30DayPayments",
"subcontractingInvoicesPaid"}
)
def get_required_fields(self) -> Set[str]:
req_fields = super(DOS5Validator, self).get_required_fields()
# as per subcontracting configuration on digitalmarketplace-frameworks
if self.answers.get("subcontracting") in [
"as a prime contractor, using third parties (subcontractors) to provide some services",
"as part of a consortium or special purpose vehicle, using third parties (subcontractors) to provide some "
"services"
]:
req_fields.add("subcontracting30DayPayments")
req_fields.add("subcontractingInvoicesPaid")
return req_fields
def formatting_errors(self, answers) -> Dict[str, str]:
error_map = super(DOS5Validator, self).formatting_errors(answers)
for field in self.percentage_fields:
value = self.answers.get(field)
if value is not None and not is_valid_percentage(value):
error_map[field] = 'not_a_number'
return error_map
VALIDATORS = {
"g-cloud-7": G7Validator,
"g-cloud-8": SharedValidator,
"digital-outcomes-and-specialists": DOSValidator,
"digital-outcomes-and-specialists-2": SharedValidator,
"g-cloud-9": SharedValidator,
"g-cloud-10": SharedValidator,
"digital-outcomes-and-specialists-3": SharedValidator,
"g-cloud-11": SharedValidator,
"digital-outcomes-and-specialists-4": SharedValidator,
"g-cloud-12": G12Validator,
"digital-outcomes-and-specialists-5": DOS5Validator,
}
| {
"repo_name": "alphagov/digitalmarketplace-supplier-frontend",
"path": "app/main/helpers/validation.py",
"copies": "1",
"size": "14230",
"license": "mit",
"hash": 1957619954695075300,
"line_mean": 40.2463768116,
"line_max": 119,
"alpha_frac": 0.628531272,
"autogenerated": false,
"ratio": 3.751647772211969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987385555564668,
"avg_score": 0.0012646977130579586,
"num_lines": 345
} |
from functools import reduce
from operator import add
from strategies import condition, do_one, exhaust
from ...core import Add, Expr, sympify
from ...core.logic import _fuzzy_group
from ...core.strategies import flatten, glom, rm_id, sort, unpack
from ...functions import adjoint
from ...utilities import default_sort_key, sift
from ..matrices import MatrixBase, ShapeError
from .matexpr import MatrixExpr, ZeroMatrix
from .transpose import transpose
class MatAdd(MatrixExpr):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like Diofant Add
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
def _eval_is_commutative(self):
return _fuzzy_group((a.is_commutative for a in self.args),
quick_exit=True)
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
obj = Expr.__new__(cls, *args)
if check:
validate(*args)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Add(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from .trace import trace
return Add(*[trace(arg) for arg in self.args]).doit()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(MatAdd(*args))
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError('Mix of Matrix and Scalar symbols')
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError(f'Matrices {A} and {B} are not aligned')
def factor_of(arg):
return arg.as_coeff_mmul()[0]
def matrix_of(arg):
return unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
"""Merge explicit MatrixBase arguments
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X, use_unicode=False)
[1 0] [1 2]
A + [ ] + [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X), use_unicode=False)
[2 2]
A + [ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(rules)))
| {
"repo_name": "skirpichev/omg",
"path": "diofant/matrices/expressions/matadd.py",
"copies": "1",
"size": "3262",
"license": "bsd-3-clause",
"hash": 5121473153398543000,
"line_mean": 25.096,
"line_max": 71,
"alpha_frac": 0.5717351318,
"autogenerated": false,
"ratio": 3.4591728525980914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45309079843980915,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import and_
from operator import or_
from django.db import models
from django.db.models import Q
__all__ = [
'LookupManager',
'LookupManagerMixin',
'OrderedSearchManager',
'OrderedSearchManagerMixin',
'SearchManager',
'SearchManagerMixin',
]
class OrderedSearchManagerMixin(object):
search_fields_order = tuple()
def __init__(self, *args, **kwargs):
search_fields_order = kwargs.pop('search_order_fields', None)
if search_fields_order:
self.search_fields_order = search_fields_order
super(OrderedSearchManagerMixin, self).__init__(*args, **kwargs)
def search(self, queryset=None):
queryset = queryset or self.get_queryset()
if self.search_fields_order:
queryset = queryset.order_by(*self.search_fields_order)
return queryset
class OrderedSearchManager(OrderedSearchManagerMixin, models.Manager):
pass
class LookupManagerMixin(object):
search_fields = tuple()
def __init__(self, *args, **kwargs):
search_fields = kwargs.pop('search_fields', None)
if search_fields:
self.search_fields = search_fields
super(LookupManagerMixin, self).__init__(*args, **kwargs)
def search(self, *args, **kwargs):
"""Search for anything in args and in search_fields
In summary, produces the queryset of all results that each arg from
args at least once in any of the fields in "search_fields"
For every word, produces a "factor" the the form:
Q(field1__icontains:word1) | Q(field2__icontains:word1) | ...
And reduces factors like:
factor1 & factor2 & ...
"""
queryset = kwargs.pop('queryset', self.get_queryset())
query_keys = ["%s__icontains" % field for field in self.search_fields]
query_factors = []
for word in args:
query_pairs = [{k: word} for k in query_keys]
query_objects = [Q(**qp) for qp in query_pairs]
query_factor = reduce(or_, query_objects)
query_factors.append(query_factor)
query_product = reduce(and_, query_factors)
queryset = queryset.filter(query_product)
return queryset
class LookupManager(LookupManagerMixin, models.Manager):
pass
class SearchManagerMixin(LookupManagerMixin, OrderedSearchManagerMixin):
pass
class SearchManager(SearchManagerMixin, models.Manager):
pass
| {
"repo_name": "jleeothon/inquisition",
"path": "inquisition/managers.py",
"copies": "1",
"size": "2489",
"license": "mit",
"hash": 8629245437145161000,
"line_mean": 28.9879518072,
"line_max": 78,
"alpha_frac": 0.6508638007,
"autogenerated": false,
"ratio": 4.1072607260726075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5258124526772607,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import and_, or_
from common.funcfun import lmap
from retrieval.boolean_parser import Node
import shelve
from other.constants import INDEX_FOLDER_NAME, DOCUMENT_INFO_NAME, INFO_FOLDER_NAME, STEMSDICT_NAME,\
KEYWORDSINDOCUMENTS_NAME, SCORES_TABLE
from retrieval.stem import Stem
import math
class Index:
def __init__(self, directory, settings, documentsInfo = None):
self.applySettings(settings)
try:
self.info = shelve.open(directory + INFO_FOLDER_NAME + 'info')
except Exception as err:
print(err)
self.directory = directory
self.documents_info = documentsInfo if documentsInfo else self._get_translation()
self.total_records = len(self.documents_info)
self.stemsDict = None
self.docKeywords = None
self.allKeywords = None
self.searchedStem = {}
def getDocInfo(self, docID):
return self.documents_info[docID]
def getAllWords(self):
return self.info['allwords']
def applySettings(self, settings):
self.keylen = settings.get('keylen')
def get_documents(self, parsedQuery):
documents = self._by_node(parsedQuery)
return lmap(self._translate, documents)
def get_stem_info(self, stem):
stemObject = self.searchedStem.get(stem, False)
if stemObject:
return stemObject
else:
prefix = stem[:self.keylen]
try:
sh = shelve.open(self.directory + INDEX_FOLDER_NAME + '/' + prefix)
record = sh[stem]
stemObject = Stem(record, stem)
self.searchedStem[stem] = stemObject
except KeyError:
stemObject = Stem()
except Exception as err:
stemObject = Stem()
return stemObject
def getTermCountInDoc(self, term, docID):
documents = self.get_stem_info(term).documents
return documents.get(docID, 0)
def term_frequency(self, term, documentID, wordsCount):
stem = self.get_stem_info(term)
documents = stem.documents
freq = documents.get(documentID, 0)
if wordsCount > 1:
return freq / math.log(wordsCount)
else:
return freq / 1000
def totalTermFrequency(self, term):
documents = self.get_stem_info(term).documents
return sum(x[1] for x in documents.items())
def contains_term(self, term, documentID):
if not self.docKeywords:
temp = self.info[KEYWORDSINDOCUMENTS_NAME]
self.docKeywords = temp['inDocuments']
self.allKeywords = temp['keywords']
if term in self.allKeywords:
return term in self.docKeywords[documentID]
else:
return self.term_frequency(term, documentID, math.e) > 0
def document_frequency(self, term):
return len(self.get_stem_info(term).documents)
def getKeywords(self, docID):
return self.documents_info[docID]['keywords']
def stem2word(self, stem, queryStems = {}):
if not self.stemsDict:
self.stemsDict = self.info[STEMSDICT_NAME]
return queryStems.get(stem, self.stemsDict.get(stem, stem))
def getLinks(self):
return [x['url'] for x in self.info[DOCUMENT_INFO_NAME]]
def getKeywordsScore(self):
return self.info[SCORES_TABLE]
def _by_node(self, node):
if isinstance(node, Node):
if node.type == 'AND':
return self._by_and_node(node.children)
if node.type == 'OR':
return self._by_or_node(node.children)
if node.type == 'NOT':
return self._by_not_node(node.children[0])
else:
return self._by_word(node)
def _by_and_node(self, children):
return reduce(and_, map(self._by_node, children))
def _by_or_node(self, children):
temp = list(map(self._by_node, children))
return reduce(or_, temp)
def _by_not_node(self, argument):
allDocuments = set(range(len(self.documents_info)))
matchedDoc = self._by_node(argument)
return allDocuments - matchedDoc
def _by_words(self, words):
return lmap(self._translate, reduce(and_, map(self._by_word, words)))
def _by_word(self, stem):
documents = self.get_stem_info(stem)
return documents.docids
def _get_translation(self):
return self.info[DOCUMENT_INFO_NAME]
def _translate(self, docid):
tr = self.documents_info[docid]
return tr | {
"repo_name": "havrlant/fca-search",
"path": "src/retrieval/index.py",
"copies": "1",
"size": "3978",
"license": "bsd-2-clause",
"hash": -1280858361503991600,
"line_mean": 27.4214285714,
"line_max": 101,
"alpha_frac": 0.7099044746,
"autogenerated": false,
"ratio": 3.0647149460708785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8456608837731378,
"avg_score": 0.16360211658790016,
"num_lines": 140
} |
from functools import reduce
from operator import getitem
from inspect import getdoc
from werkzeug.exceptions import NotFound
from yggdrasil.record import Record
from . import Page
class Root(Page):
def __init__(self, urlmap):
self.urlmap = urlmap
def render_rule(self, request, rule):
result = Record()
result.endpoint=rule.endpoint
segments = rule.endpoint.split(".")
try:
endpoint = reduce(getitem, segments, self)
except KeyError:
endpoint = None
if rule.methods is not None:
result.methods = tuple(rule.methods)
description = getdoc(endpoint)
if description is not None:
result.description = description
return result
def on_intro(self, request):
"""
This page shows current routing table with endpoints and descriptions.
"""
result = Record()
result.rules = rules = Record()
for rule in self.urlmap.iter_rules():
rules[rule.rule] = self.render_rule(request, rule)
return result
class RootRefs(Root):
def render_rule(self, request, rule):
result = super().render_rule(request, rule)
try:
result.ref = self.build_url(request, rule.endpoint)
except: pass
return result | {
"repo_name": "Evgenus/yggdrasil",
"path": "yggdrasil/app/pages/root.py",
"copies": "1",
"size": "1356",
"license": "mit",
"hash": -106253939665195860,
"line_mean": 24.6037735849,
"line_max": 78,
"alpha_frac": 0.6091445428,
"autogenerated": false,
"ratio": 4.490066225165563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5599210767965563,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import getitem
from typing import Any, Callable, Sequence, Tuple
from basic_utils.seq_helpers import butlast, last
__all__ = [
'get_in_dict',
'get_keys',
'prune_dict',
'set_in_dict',
]
def get_keys(d: dict, keys: Sequence[Any], default: Callable=None) -> Tuple:
"""
Returns multiple values for keys in a dictionary
Empty key values will be None by default
>>> d = {'x': 24, 'y': 25}
>>> get_keys(d, ('x', 'y', 'z'))
(24, 25, None)
"""
return tuple(d.get(key, default) for key in keys)
def get_in_dict(d: dict, keys: Sequence[str]) -> Any:
"""
Retrieve nested key from dictionary
>>> d = {'a': {'b': {'c': 3}}}
>>> get_in_dict(d, ('a', 'b', 'c'))
3
"""
return reduce(getitem, keys, d)
def set_in_dict(d: dict, keys: Sequence[str], value: Any) -> None:
"""
Sets a value inside a nested dictionary
>>> d = {'a': {'b': {'c': 3}}}
>>> set_in_dict(d, ('a', 'b', 'c'), 10)
>>> d
{'a': {'b': {'c': 10}}}
"""
get_in_dict(d, butlast(keys))[last(keys)] = value
def prune_dict(d: dict) -> dict:
"""
Returns new dictionary with falesly values removed.
>>> prune_dict({'a': [], 'b': 2, 'c': False})
{'b': 2}
"""
return filter_values(d, bool)
def filter_keys(d: dict, predicate: Callable) -> dict:
"""
Returns new subset dict of d where keys pass predicate fn
>>> filter_keys({'Lisa': 8, 'Marge': 36}, lambda x: len(x) > 4)
{'Marge': 36}
"""
return {k: v for k, v in d.items() if predicate(k)}
def filter_values(d: dict, predicate: Callable) -> dict:
"""
Returns new subset dict of d where values pass predicate fn
>>> d = {'Homer': 39, 'Marge': 36, 'Bart': 10}
>>> filter_values(d, lambda x: x < 20)
{'Bart': 10}
"""
return {k: v for k, v in d.items() if predicate(v)}
| {
"repo_name": "Jackevansevo/basic-utils",
"path": "basic_utils/dict_helpers.py",
"copies": "1",
"size": "1903",
"license": "mit",
"hash": 4739013772402712000,
"line_mean": 23.0886075949,
"line_max": 76,
"alpha_frac": 0.5496584341,
"autogenerated": false,
"ratio": 3.0792880258899675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9127302534295408,
"avg_score": 0.0003287851389117212,
"num_lines": 79
} |
from functools import reduce
from operator import getitem
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.wrappers import Request
from werkzeug import routing
from yggdrasil.record import Record
class WebApp:
def __init__(self, urlmap, namespace):
self.urlmap = urlmap
self.namespace = namespace
def dispatch(self, request):
adapter = self.urlmap.bind_to_environ(request.environ)
try:
path, values = adapter.match()
except HTTPException as error:
return error
segments = path.split(".")
endpoint = reduce(getitem, segments, self.namespace)
request.urls = adapter
try:
return endpoint(request, **values)
except HTTPException as error:
return error
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
class Rules(routing.RuleFactory):
def __init__(self, *rules):
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
yield rule
class Wrapper:
def __init__(self, method, content):
self.method = method
self.content = content
def __getitem__(self, name):
next = self.content[name]
if next is None:
raise KeyError(name)
return Wrapper(self.method, next)
def __call__(self, *args, **kwargs):
return self.method(self.content(*args, **kwargs))
| {
"repo_name": "Evgenus/yggdrasil",
"path": "yggdrasil/app/__init__.py",
"copies": "1",
"size": "1753",
"license": "mit",
"hash": -3152881796342467000,
"line_mean": 28.7118644068,
"line_max": 62,
"alpha_frac": 0.6240730177,
"autogenerated": false,
"ratio": 4.3606965174129355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5484769535112936,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import iand, ior
from string import punctuation
import django
from django.apps import apps
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.core.exceptions import ImproperlyConfigured
from django.db.models import CharField, Manager, Q, TextField
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
if django.VERSION >= (1, 10):
class ManagerDescriptor(ManagerDescriptor):
"""
This class exists purely to skip the abstract model check
in the __get__ method of Django's ManagerDescriptor.
"""
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError(
"Manager isn't accessible via %s instances" % cls.__name__
)
# In ManagerDescriptor.__get__, an exception is raised here
# if cls is abstract
if cls._meta.swapped:
raise AttributeError(
"Manager isn't available; "
"'%s.%s' has been swapped for '%s'"
% (
cls._meta.app_label,
cls._meta.object_name,
cls._meta.swapped,
)
)
return cls._meta.managers_map[self.manager.name]
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED),
)
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super().__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = (
" ".join(query.split())
.replace("+ ", "+")
.replace('+"', '"+')
.replace("- ", "-")
.replace('-"', '"-')
.split('"')
)
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [
("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()
]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in settings.STOP_WORDS]
get_positive_terms = lambda terms: [
t.lower().strip(punctuation) for t in terms if t[0:1] != "-"
]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [
reduce(
iand,
[
~Q(**{"%s__icontains" % f: t[1:]})
for f in self._search_fields.keys()
],
)
for t in terms
if t[0:1] == "-"
]
required = [
reduce(
ior,
[Q(**{"%s__icontains" % f: t[1:]}) for f in self._search_fields.keys()],
)
for t in terms
if t[0:1] == "+"
]
optional = [
reduce(
ior, [Q(**{"%s__icontains" % f: t}) for f in self._search_fields.keys()]
)
for t in terms
if t[0:1] not in "+-"
]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
clone = super()._clone(*args, **kwargs)
clone._search_terms = self._search_terms
clone._search_fields = self._search_fields
clone._search_ordered = self._search_ordered
return clone
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super().order_by(*field_names)
def annotate_scores(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super().iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
if result.publish_date:
age = (now() - result.publish_date).total_seconds()
if age > 0:
count = count / age ** settings.SEARCH_AGE_SCALE_FACTOR
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super().__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.get_fields():
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super().contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models() if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list()) for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured(
"Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors)
)
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs).annotate_scores())
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
use_in_migrations = False
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager, SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
if hasattr(model.objects, "published"):
for item in (
model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")
):
items[item.get_absolute_url()] = item
return items
| {
"repo_name": "stephenmcd/mezzanine",
"path": "mezzanine/core/managers.py",
"copies": "2",
"size": "17672",
"license": "bsd-2-clause",
"hash": -1636184606120646400,
"line_mean": 38.7123595506,
"line_max": 88,
"alpha_frac": 0.5766183794,
"autogenerated": false,
"ratio": 4.552292632663575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6128911012063576,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import ior
from dal import autocomplete
from django.contrib.auth.decorators import user_passes_test
from django.http.response import JsonResponse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView
from .forms import SearchForm
from .models import Person, SocialField
@method_decorator(user_passes_test(lambda u: u.is_staff), name='dispatch')
class PersonAutoComplete(autocomplete.Select2QuerySetView):
"""DAL autocomplete for use in the Django-admin"""
def get_queryset(self):
"""Get a query set to return for DAL autocomplete in admin"""
people = Person.objects.all()
return people.filter(nomina__istartswith=self.q).order_by('nomina')
class NodeEdgeListView(ListView):
"""Provide node-edge json for use """
model = Person
def render_to_json_response(self, **kwargs):
"""Contextless rendering of queryset from get_data"""
return JsonResponse(self.get_data())
def assign_class(self, person):
"""Assign classes to a group based on fields"""
# NOTE: This is dumb but very safe that the group will be set in some way
Y = SocialField.DEFINITE
group = 0
if person.citizen == Y:
group = 'citizen'
if person.equestrian == Y:
group = 'equestrian'
if person.senatorial == Y:
group = 'senatorial'
if person.consular == Y:
group = 'consular'
return group
def get_data(self):
"""Get data for people and their relationships and format for d3.js"""
people = self.get_queryset()
# filter out a nodelist in the format d3v4 expects
node_edge_dict = {
'nodes': [{'id': 'Gaius Plinius Secundus', 'group': 'consular'}],
'links': [],
}
# generate nodes
for person in people:
node_edge_dict['nodes'].append(
{'id': person.nomina, 'group': self.assign_class(person)}
)
# add all links to pliny based on number of letters
node_edge_dict['links'].append(
{
'source': 'Gaius Plinius Secundus',
'target': person.nomina,
'weight': (
person.letters_to.count() +
person.mentioned_in.count()
),
}
)
# make all Pliny links reciprocal
reciprocal_links = []
for link in node_edge_dict['links']:
reciprocal_links.append({
'source': link['target'],
'target': link['source'],
'weight': link['weight']
})
node_edge_dict['links'] += reciprocal_links
"""
# Pulled for now not needed for ego 1.5 model
# relationships should be reciprocal if they need to be already
for relationship in Relationship.objects.all():
node_edge_dict['links'].append({
'source': relationship.from_person.nomina,
'target': relationship.to_person.nomina,
'weight': 1,
})
comumienses = Person.objects.filter(from_comum=True)
for outer in comumienses:
for inner in comumienses:
node_edge_dict['links'].append({
'source': outer.nomina,
'target': inner.nomina,
'weight': 1
})
# group all relationships by common source and target if they
# aren't already
grouped_edge_list = []
for outer in node_edge_dict['links']:
for inner in node_edge_dict['links']:
if (outer['source'] == inner['source']) and \
(outer['target'] == inner['target']):
outer['weight'] = outer['weight'] + inner['weight']
grouped_edge_list.append(outer)
node_edge_dict['links'] = [dict(t) for t in set([tuple(d.items())
for d in grouped_edge_list])]
"""
return node_edge_dict
def render_to_response(self, context, **kwargs):
return self.render_to_json_response()
class SocialClassView(ListView):
model = Person
def render_to_json_response(self, **kwargs):
"""Contextless rendering of queryset from get_data"""
return JsonResponse(self.get_data())
def get_data(self):
people = self.get_queryset()
book = self.request.GET.get('q', '')
if book:
try:
book = int(book)
people = people.filter(letters_to__book=book)
except ValueError:
pass
# data for Chart.js
data = {
'datasets': [{
# get people as senatorial, equestrian, and citizen
'data': [
people.filter(senatorial='Y').count(),
people.filter(equestrian='Y', senatorial='N').count(),
people.filter(citizen='Y',
equestrian='N', senatorial='N').count()
],
'backgroundColor': [
'rgb(127, 63, 191)',
'rgb(191, 63, 63)',
'rgb(63, 191, 191)'
],
}],
'labels': [
'Senatorial',
'Equestrian',
'Citizen',
]
}
return data
def render_to_response(self, context, **kwargs):
return self.render_to_json_response()
| {
"repo_name": "bwhicks/PlinyProject",
"path": "backend/prosopography/views.py",
"copies": "1",
"size": "5725",
"license": "mit",
"hash": 8519941288550193000,
"line_mean": 34.5590062112,
"line_max": 81,
"alpha_frac": 0.5322270742,
"autogenerated": false,
"ratio": 4.310993975903615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5343221050103615,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import itemgetter
from pprint import pformat
from six import viewkeys, iteritems
from six.moves import map, zip
from toolz import curry, flip
from .sentinel import sentinel
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Examples
--------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Examples
--------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel('_no_default')
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
)
def invert(d):
"""
Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}}
"""
out = {}
for k, v in iteritems(d):
try:
out[v].add(k)
except KeyError:
out[v] = {k}
return out
def keysorted(d):
"""Get the items from a dict, sorted by key.
Example
-------
>>> keysorted({'c': 1, 'b': 2, 'a': 3})
[('a', 3), ('b', 2), ('c', 1)]
"""
return sorted(iteritems(d), key=itemgetter(0))
| {
"repo_name": "quantopian/zipline",
"path": "zipline/utils/functional.py",
"copies": "1",
"size": "10260",
"license": "apache-2.0",
"hash": -5660203899356712000,
"line_mean": 23.4285714286,
"line_max": 79,
"alpha_frac": 0.5478557505,
"autogenerated": false,
"ratio": 3.9085714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49564271790714287,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import mul
from itertools import combinations_with_replacement
def lcg():
"""Linear Congruential Generator."""
TWO_POW_20 = reduce(mul, (2 for _ in range(20)))
t = 0
while True:
t = 615949*t + 797807
t %= TWO_POW_20
yield t - TWO_POW_20 // 2
def triangle_positions(num_rows):
"""Generates all the tuples (row,pos_in_row) for the triangle."""
for r in range(num_rows):
for p in range(r + 1):
yield r, p
def build_triangle(num_rows):
"""Build the triangle using the LCG."""
triangle = [[] for i in range(num_rows)]
for rnd, (row, pos_in_row) in zip(lcg(), triangle_positions(num_rows)):
triangle[row].append(rnd)
return triangle
def build_triangle_eg():
"""The example triangle in the question."""
return [[15],
[-14, -7],
[20, -13, -5],
[-3, 8, 23, -26],
[1, -4, -5, -18, 5],
[-16, 31, 2, 9, 28, 3]]
def build_acc(seq):
"""
Accumulates the values in an sequence, e.g.:
[a, b, c] => [a, a+b, a+b+c]
"""
acc = 0
new = []
for s in seq:
acc += s
new.append(acc)
return new
def sum_subsequence(acc, i, j):
"""
Finds the sum of a subsequence which starts at i and ends in j
(inclusize) with i <= j. The accumulation of the sequence must be given
as acc.
"""
if i == 0:
return acc[j]
else:
return acc[j] - acc[i - 1]
NUM_ROWS = 1000
triangle = build_triangle_eg()
triangle = build_triangle(NUM_ROWS)
acc = [build_acc(row) for row in triangle]
min_s = float("inf")
for row, pos_in_row in triangle_positions(NUM_ROWS):
#print(row, pos_in_row)
s = 0
for height in range(NUM_ROWS - row):
s += sum_subsequence(acc[row + height], pos_in_row, pos_in_row + height)
#print("", height, s)
min_s = min(min_s, s)
print("MIN_S:", min_s)
| {
"repo_name": "peterstace/project-euler",
"path": "OLD_PY_CODE/project_euler_old_old/150/150.py",
"copies": "1",
"size": "1967",
"license": "unlicense",
"hash": 7294832828322204000,
"line_mean": 22.987804878,
"line_max": 80,
"alpha_frac": 0.5566853076,
"autogenerated": false,
"ratio": 3.1123417721518987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9088765037861151,
"avg_score": 0.01605240837814965,
"num_lines": 82
} |
from functools import reduce
from operator import mul
from quantecon import cartesian
import numpy as np
from numpy import zeros
from typing import TypeVar, Generic, Dict
T = TypeVar("T")
S = TypeVar("S")
def prod(l):
return reduce(mul, l, 1.0)
from dolo.numeric.misc import mlinspace
class Grid:
def __mul__(self, rgrid):
return cat_grids(self, rgrid)
@property
def nodes(self):
return self.__nodes__
@property
def n_nodes(self):
return self.__nodes__.shape[0]
def node(self, i):
return self.__nodes__[i, :]
class ProductGrid(Grid, Generic[T, S]):
def __init__(self, g1: T, g2: S, names=None):
self.grids = [g1, g2]
self.names = names
def __getitem__(self, v):
return self.grids[self.names.index(v)]
def __repr__(self):
return str.join(" × ", [e.__repr__() for e in self.grids])
class EmptyGrid(Grid):
type = "empty"
@property
def nodes(self):
return None
@property
def n_nodes(self):
return 0
def node(self, i):
return None
def __add__(self, g):
return g
class PointGrid(Grid):
type = "point"
def __init__(self, point):
self.point = np.array(point)
@property
def nodes(self):
return None
@property
def n_nodes(self):
return 1
def node(self, i):
return None
class UnstructuredGrid(Grid):
type = "unstructured"
def __init__(self, nodes):
nodes = np.array(nodes, dtype=float)
self.min = nodes.min(axis=0)
self.max = nodes.max(axis=0)
self.__nodes__ = nodes
self.d = len(self.min)
class CartesianGrid(Grid):
pass
class UniformCartesianGrid(CartesianGrid):
type = "UniformCartesian"
def __init__(self, min, max, n=[]):
self.d = len(min)
# this should be a tuple
self.min = np.array(min, dtype=float)
self.max = np.array(max, dtype=float)
if len(n) == 0:
self.n = np.zeros(n, dtype=int) + 20
else:
self.n = np.array(n, dtype=int)
# this should be done only on request.
self.__nodes__ = mlinspace(self.min, self.max, self.n)
# def node(i:)
# pass
def __add__(self, g):
if not isinstance(g, UniformCartesianGrid):
raise Exception("Not implemented.")
n = np.array(tuple(self.n) + tuple(g.n))
min = np.array(tuple(self.min) + tuple(self.min))
max = np.array(tuple(self.max) + tuple(self.max))
return UniformCartesianGrid(min, max, n)
def __numba_repr__(self):
return tuple([(self.min[i], self.max[i], self.n[i]) for i in range(self.d)])
class NonUniformCartesianGrid(CartesianGrid):
type = "NonUniformCartesian"
def __init__(self, list_of_nodes):
list_of_nodes = [np.array(l) for l in list_of_nodes]
self.min = [min(l) for l in list_of_nodes]
self.max = [max(l) for l in list_of_nodes]
self.n = np.array([(len(e)) for e in list_of_nodes])
# this should be done only on request.
self.__nodes__ = cartesian(list_of_nodes)
self.list_of_nodes = list_of_nodes # think of a better name
def __add__(self, g):
if not isinstance(g, NonUniformCartesianGrid):
raise Exception("Not implemented.")
return NonUniformCartesianGrid(self.list_of_nodes + g.list_of_nodes)
def __numba_repr__(self):
return tuple([np.array(e) for e in self.list_of_nodes])
class SmolyakGrid(Grid):
type = "Smolyak"
def __init__(self, min, max, mu=2):
from interpolation.smolyak import SmolyakGrid as ISmolyakGrid
min = np.array(min)
max = np.array(max)
self.min = min
self.max = max
self.mu = mu
d = len(min)
sg = ISmolyakGrid(d, mu, lb=min, ub=max)
self.sg = sg
self.d = d
self.__nodes__ = sg.grid
def cat_grids(grid_1, grid_2):
if isinstance(grid_1, EmptyGrid):
return grid_2
if isinstance(grid_1, CartesianGrid) and isinstance(grid_2, CartesianGrid):
min = np.concatenate([grid_1.min, grid_2.min])
max = np.concatenate([grid_1.max, grid_2.max])
n = np.concatenate([grid_1.n, grid_2.n])
return CartesianGrid(min, max, n)
else:
raise Exception("Not Implemented.")
# compat
def node(grid, i):
return grid.node(i)
def nodes(grid):
return grid.nodes
def n_nodes(grid):
return grid.n_nodes
if __name__ == "__main__":
print("Cartsian Grid")
grid = CartesianGrid([0.1, 0.3], [9, 0.4], [50, 10])
print(grid.nodes)
print(nodes(grid))
print("UnstructuredGrid")
ugrid = UnstructuredGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid, 0))
print(n_nodes(ugrid))
print("Non Uniform CartesianGrid")
ugrid = NonUniformCartesianGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid, 0))
print(n_nodes(ugrid))
print("Smolyak Grid")
sg = SmolyakGrid([0.1, 0.2], [1.0, 2.0], 2)
print(nodes(sg))
print(node(sg, 1))
print(n_nodes(sg))
| {
"repo_name": "EconForge/dolo",
"path": "dolo/numeric/grids.py",
"copies": "1",
"size": "5197",
"license": "bsd-2-clause",
"hash": 7666026254038726000,
"line_mean": 21.3004291845,
"line_max": 84,
"alpha_frac": 0.577752117,
"autogenerated": false,
"ratio": 3.1936078672403196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42713599842403194,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import mul
import time
start_time = time.time()
n = 100000
def factorization(n):
"""
Generate the prime factorization of n in the form of pairs (p, k)
where the prime p appears k times in the factorization.
"""
p = 1
while p * p < n:
p += 1
k = 0
while n % p == 0:
k += 1
n //= p
if k:
yield p, k
if n != 1:
yield n, 1
def divisor_gen(n):
"""
Generate all divisors of a number(including entire number!)
"""
factors = list(factorization(n))
nfactors = len(factors)
f = [0] * nfactors
while True:
yield reduce(mul, [factors[x][0]**f[x] for x in range(nfactors)], 1)
i = 0
while True:
f[i] += 1
if f[i] <= factors[i][1]:
break
f[i] = 0
i += 1
if i >= nfactors:
return
def check(n):
divs = [0 for i in range(1000001)]
max_set = set()
len_max_set = 0
for number in range(6,n+1):
if not divs[number]:
divs[number] = sum(divisor_gen(number)) - number
SOD = divs[number]
numbers = set()
while True:
if SOD == 1 or SOD > n:
numbers = set()
break
elif SOD in numbers:
if number in numbers:
if len(numbers) > len_max_set:
max_set = numbers.copy()
len_max_set = len(max_set)
elif len(numbers) == len_max_set:
max_set |= numbers
break
else:
numbers |= {SOD}
if not divs[SOD]:
divs[SOD] = sum(divisor_gen(SOD)) - SOD
SOD = divs[SOD]
return min(max_set)
print (check(n))
print("--- %s seconds ---" % (time.time() - start_time))
| {
"repo_name": "GodMode-On/Tasks-from-different-sites",
"path": "Hackerrank/Project Euler #95: Amicable chains.py",
"copies": "1",
"size": "1969",
"license": "unlicense",
"hash": 4910177555205934000,
"line_mean": 24.5714285714,
"line_max": 76,
"alpha_frac": 0.4520060945,
"autogenerated": false,
"ratio": 3.7080979284369113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4660104022936911,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import mul
__author__ = 'anuvrat'
'''
Exceeds time limit
'''
class Memoize(object):
def __init__(self, f):
self.f = f
self.cache = {}
def __call__(self, *args):
if args not in self.cache:
self.cache[args] = self.f(*args)
return self.cache[args]
def primes_from_2_to(n):
sieve = [True] * n
for i in range(1, int((n ** 0.5) / 3) + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[int(k * k / 3)::2 * k] = [False] * len(sieve[int(k * k / 3)::2 * k])
sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = [False] * len(sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k])
indexes = [i for i, x in enumerate(sieve) if x][1:]
new_indexes = [3 * x + 1 | 1 for x in indexes]
primes_found = [2, 3]
primes_found.extend(new_indexes)
return primes_found
primes_set = primes_from_2_to(600000)
primes = (2, ) + tuple(n for n in range(3, 1000, 2) if n in primes_set)
@Memoize
def get_prime_factors(n):
factors = {}
for checker in primes_set:
count = 0
while n > 1 and n % checker == 0:
count += 1
n //= checker
if count > 0:
factors[checker] = count
return factors
@Memoize
def get_sum_divisors_for_prime(prime, occurrences):
return (prime ** (occurrences + 1) - 1) / (prime - 1)
if __name__ == '__main__':
test_cases = int(input().strip())
for _ in range(test_cases):
num = int(input().strip())
if num == 1:
print(0)
continue
counts = get_prime_factors(num)
total = reduce(mul, [get_sum_divisors_for_prime(k, counts.get(k)) for k in counts.keys()]) - num
print(int(total))
| {
"repo_name": "anuvrat/spoj",
"path": "problems/tutorial/000074_divsum.py",
"copies": "1",
"size": "1779",
"license": "mit",
"hash": -6486141029699924000,
"line_mean": 24.0563380282,
"line_max": 126,
"alpha_frac": 0.5182686903,
"autogenerated": false,
"ratio": 2.906862745098039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3925131435398039,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import mul
from lasagne.layers import Layer
from lasagne.layers import Conv2DLayer
from lasagne.layers import MaxPool2DLayer
import numpy as np
from tabulate import tabulate
convlayers = [Conv2DLayer]
maxpoollayers = [MaxPool2DLayer]
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
convlayers.append(Conv2DCCLayer)
maxpoollayers.append(MaxPool2DCCLayer)
except ImportError:
pass
try:
from lasagne.layers.dnn import Conv2DDNNLayer
from lasagne.layers.dnn import MaxPool2DDNNLayer
convlayers.append(Conv2DDNNLayer)
maxpoollayers.append(MaxPool2DDNNLayer)
except ImportError:
pass
class ansi:
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
def is_conv2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(convlayers))
return any([isinstance(layer, tuple(convlayers))
for layer in layers])
def is_maxpool2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(maxpoollayers))
return any([isinstance(layer, tuple(maxpoollayers))
for layer in layers])
def get_real_filter(layers, img_size):
"""Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
"""
real_filter = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
real_filter[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
else:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
first_conv_layer = False
elif is_maxpool2d(layer):
real_filter[j] = real_filter[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
real_filter[j] = img_size
real_filter[0] = img_size
return real_filter
def get_receptive_field(layers, img_size):
"""Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
"""
receptive_field = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
receptive_field[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
last_field = receptive_field[i]
new_field = (last_field + expon *
(np.array(layer.filter_size) - 1))
receptive_field[j] = new_field
else:
receptive_field[j] = layer.filter_size
first_conv_layer = False
elif is_maxpool2d(layer):
receptive_field[j] = receptive_field[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
receptive_field[j] = img_size
receptive_field[0] = img_size
return receptive_field
def get_conv_infos(net, min_capacity=100. / 6, detailed=False):
CYA = ansi.CYAN
END = ansi.ENDC
MAG = ansi.MAGENTA
RED = ansi.RED
layers = net.layers_.values()
# assume that first layer is input layer
img_size = layers[0].output_shape[2:]
header = ['name', 'size', 'total', 'cap.Y', 'cap.X',
'cov.Y', 'cov.X']
if detailed:
header += ['filter Y', 'filter X', 'field Y', 'field X']
shapes = [layer.output_shape[1:] for layer in layers]
totals = [str(reduce(mul, shape)) for shape in shapes]
shapes = ['x'.join(map(str, shape)) for shape in shapes]
shapes = np.array(shapes).reshape(-1, 1)
totals = np.array(totals).reshape(-1, 1)
real_filters = get_real_filter(layers, img_size)
receptive_fields = get_receptive_field(layers, img_size)
capacity = 100. * real_filters / receptive_fields
capacity[np.logical_not(np.isfinite(capacity))] = 1
img_coverage = 100. * receptive_fields / img_size
layer_names = [layer.name if layer.name
else str(layer).rsplit('.')[-1].split(' ')[0]
for layer in layers]
colored_names = []
for name, (covy, covx), (capy, capx) in zip(
layer_names, img_coverage, capacity):
if (
((covy > 100) or (covx > 100)) and
((capy < min_capacity) or (capx < min_capacity))
):
name = "{}{}{}".format(RED, name, END)
elif (covy > 100) or (covx > 100):
name = "{}{}{}".format(CYA, name, END)
elif (capy < min_capacity) or (capx < min_capacity):
name = "{}{}{}".format(MAG, name, END)
colored_names.append(name)
colored_names = np.array(colored_names).reshape(-1, 1)
table = np.hstack((colored_names, shapes, totals, capacity, img_coverage))
if detailed:
table = np.hstack((table, real_filters.astype(int),
receptive_fields.astype(int)))
return tabulate(table, header, floatfmt='.2f')
| {
"repo_name": "scottlittle/nolearn",
"path": "nolearn/lasagne/util.py",
"copies": "5",
"size": "5836",
"license": "mit",
"hash": -1914668629933426400,
"line_mean": 32.1590909091,
"line_max": 98,
"alpha_frac": 0.6038382454,
"autogenerated": false,
"ratio": 3.4168618266978923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6520700072097892,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import mul, rshift
from ..concrete.list import List
from ..utils.decorators import annotate
from .monoid import mappend
from .pure import identity
__all__ = ('fmap', 'unit', 'lift', 'multiapply', 'multibind',
'cons', 'mcons', 'sequence', 'mapM')
@annotate(type="Functor f => (a -> b) -> f a -> f b")
def fmap(f, functor):
"""Functional form of fmap.
"""
return functor.fmap(f)
@annotate(type="Applicative f => a -> f a")
def unit(v, applicative):
"""Puts a value in the minimum necessary context to form an Applicative.
"""
return applicative.unit(v)
@annotate(type="Applicative f => f (a -> b) -> [f a] -> f b")
def multiapply(initial, *args):
"""Shorthand for stringing many applicatives together.
This is designed for use with a curried function that will gradually
build until it reaches the correct number of arguments to call itself.
"""
return reduce(mul, args, initial)
@annotate(type="Applicative f => (a -> b) -> a -> f -> f b")
def lift(f, v, applicative):
"""Applies a function to a value and puts it into an applicative context.
"""
return unit(f(v), applicative)
@annotate(type="Monad m => m a -> [a -> m a] -> m a")
def multibind(monad, *binds):
"""Shortcut function for composing many monadic binds together.
>>> add_two = lambda x: Just(x+2)
>>> multibind(Just(2), *repeat(add_two, 3))
... Just 8
"""
return reduce(rshift, binds, monad)
@annotate(type="a -> [a] -> [a]")
def cons(x, xs):
"""Prepends a value to a pynads.List of existing values.
.. code-block:: Python
from pynads import List
from pynads.funcs import cons
cons(1, List(2,3)) # List(1,2,3)
cons(1, cons(2, cons(3, List()))) # List(1,2,3)
"""
return mappend(List(x), xs)
@annotate(type="Monad m => m a -> m [a] -> m [a]")
def mcons(p, q):
"""Prepends a monadic value to a pynads.List of values inside of a monad.
.. code-block:: python
from pynads import Just
from pynads.funcs import mcons
mcons(Just(1), Just(List(2,3))) # Just(List(1,2,3))
mcons(Nothing, Just(List(1,2,3))) # Nothing
"""
return p >> (lambda x:
q >> (lambda xs:
p.unit(cons(x, xs))))
@annotate(type="Monad m => [m a] -> m [a]")
def sequence(*monads):
"""Folds a list of monads into a monad containing a list of the
monadic values.
.. code-block:: python
justs = List(*[Just(x) for x in range(5)])
sequence(*justs)
# In: List(Just 0, Just 2, Just 3, Just 4)
# Just List(0, 1, 2, 3, 4)
"""
return reduce(lambda q, p: mcons(p, q),
reversed(monads),
monads[0].unit(List()))
@annotate(type="Moand m => (a -> m b) -> [a] -> m [b]")
def mapM(func, *xs):
"""Maps a monadic function over an iterable of plain values, lifting
each into a monadic context then uses sequence to convert the iterable
of monads into a monad containing a pynads.List of values.
"""
return sequence(*map(func, xs))
| {
"repo_name": "justanr/pynads",
"path": "pynads/funcs/lifted.py",
"copies": "1",
"size": "3140",
"license": "mit",
"hash": 2326675607620457500,
"line_mean": 28.0740740741,
"line_max": 77,
"alpha_frac": 0.5961783439,
"autogenerated": false,
"ratio": 3.250517598343685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9346695942243686,
"avg_score": 0,
"num_lines": 108
} |
from functools import reduce
from operator import not_
from typing import Any, Callable, Iterator
__all__ = [
'comp',
'complement',
'compose',
'dec',
'even',
'identity',
'inc',
'natural_nums',
'odd',
]
sentinel = object()
def natural_nums(start: int=0, end: int=None) -> Iterator[int]:
"""
Yields a lazy sequence of natural numbers
>>> from itertools import islice
>>> list(islice(natural_nums(5), 3))
[5, 6, 7]
"""
while True:
yield start
start += 1
if start == end:
break
def identity(x: Any) -> Any:
"""
Returns the same values passed as arguments
>>> x = (10, 20)
>>> identity(x)
(10, 20)
"""
return x
def comp(*funcs: Callable) -> Callable:
"""
Takes a set of functions and returns a fn that is the composition
of those functions
"""
return reduce(lambda f, g: lambda x: f(g(x)), funcs, lambda x: x)
def complement(fn: Callable) -> Callable:
"""
Takes a function fn and returns a function that takes the same arguments
as fn with the opposite truth value.
>>> not_five = complement(lambda x: x == 5)
>>> not_five(6)
True
"""
return comp(not_, fn)
def inc(n: int) -> int:
"""
Increments n by 1
>>> inc(10)
11
"""
return n + 1
def dec(n: int) -> int:
"""
Decrements n by 1
>>> dec(5)
4
"""
return n - 1
def even(n: int) -> bool:
"""
Returns true if n is even
>>> even(2)
True
"""
return n % 2 == 0
def odd(n: int) -> bool:
"""
Returns true if n is odd
>>> even(3)
False
"""
return n % 2 == 1
# Define some common aliases
compose = comp
| {
"repo_name": "Jackevansevo/basic-utils",
"path": "basic_utils/primitives.py",
"copies": "1",
"size": "1743",
"license": "mit",
"hash": -521563974577361400,
"line_mean": 15.1388888889,
"line_max": 76,
"alpha_frac": 0.53643144,
"autogenerated": false,
"ratio": 3.486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9519537921481481,
"avg_score": 0.0005787037037037037,
"num_lines": 108
} |
from functools import reduce
from operator import or_, add
from typing import List, Union, Iterable, TypeVar
from django.contrib.postgres.search import TrigramSimilarity
from django.db import models
from django.db.models import Q, F, When, Case, Value, BooleanField
from django.shortcuts import _get_queryset
# generic type.
# See code example here to clarify why we are needed generic type:
# https://docs.python.org/3/library/typing.html#generics
QuerySetType = TypeVar('QuerySetType')
def search(term: str, model_type: Union[models.Model, models.Manager, QuerySetType],
lookups: list, ordering=None) -> QuerySetType:
"""
Return search results based on a given model
"""
def _get_Q(lookup):
return Q(**{lookup: term})
term = term.strip()
query_set = _get_queryset(model_type)
query = reduce(or_, map(_get_Q, lookups))
return (
query_set.filter(query, page__is_active=True)
.annotate(
is_name_start_by_term=Case(When(
name__istartswith=term, then=Value(True)), default=Value(False),
output_field=BooleanField())
)
.order_by(F('is_name_start_by_term').desc(), *ordering or ('name', ))
)
def trigram_search(query: str, queryset, fields: List[str]):
"""
Trigram similarity search. https://goo.gl/8QkFGj
"""
query = query.strip()
trigram_expressions = (TrigramSimilarity(f, query) for f in fields)
trigram_query = reduce(add, trigram_expressions)
return queryset.annotate(similarity=trigram_query).order_by('-similarity')
def get_field_type(model, field):
"""
Determine a type name of Django field.
@todo #stb-537:30m Create a class instead this function.
See there for details: https://github.com/fidals/refarm-site/pull/315/files#r270500698
"""
if '__' in field:
# translate `product__name` to `name` and swap `model` to `Product`
model_name, field = field.split('__')
model = model._meta.get_field(model_name).related_model
return (
model._meta
.get_field(field)
.get_internal_type()
)
class Search:
def __init__(
self, name, qs, fields,
template_fields=None,
min_similarity=0.3,
redirect_field=None
):
"""
:param name: used as variable in templates. Example: "category"
:param qs: queryset of model to be searched for
:param fields: list of query lookups
:param template_fields: list fields for django templates
:param min_similarity: used to trigram similarity search
:param redirect_field: when client search for this field, the result is
redirected to custom page
"""
self.name = name
self.fields = fields
self.template_fields = template_fields
self.qs = qs
self.min_similarity = min_similarity
self.redirect_field = redirect_field
self.trigram_fields = []
self.decimal_fields = []
for field in fields:
type_ = get_field_type(self.qs.model, field)
if type_ in ['CharField', 'TextField']:
# Trigram similarity supports only these two entity types
self.trigram_fields.append(field)
else:
self.decimal_fields.append(field)
def search(self, term: str):
def _trigram_search(query):
"""Just a shortcut for trigram_search function call."""
return trigram_search(query, self.qs, self.trigram_fields).filter(
similarity__gt=self.min_similarity
)
if not term.isdecimal():
return _trigram_search(term)
if not self.trigram_fields:
return search(term, self.qs, self.decimal_fields)
elif not self.decimal_fields:
return _trigram_search(term)
else:
trigram_data = _trigram_search(term)
default_data = search(term, self.qs, self.decimal_fields)
return trigram_data if trigram_data else default_data if default_data else None
def search_by_redirect_field(self, term: str):
if not self.redirect_field:
return
return (
self.qs.filter(**{self.redirect_field: term}).first()
if term.isdecimal() else None
)
class Limit:
def __init__(self, limit):
self.limit = limit
self.size = 0 # number of found elements
def limit_data(self, data: Iterable) -> list:
data = data or []
limit = self.limit - self.size
if limit <= 0:
return []
limited_data = data[:limit]
self.size += len(limited_data)
return limited_data
| {
"repo_name": "fidals/refarm-site",
"path": "search/search.py",
"copies": "1",
"size": "4742",
"license": "mit",
"hash": -8247908616544271000,
"line_mean": 31.2585034014,
"line_max": 91,
"alpha_frac": 0.6140868832,
"autogenerated": false,
"ratio": 3.941812136325852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003119014648264029,
"num_lines": 147
} |
from functools import reduce
from operator import or_
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from sc2.data import race_townhalls
import enum
class BroodlordBot(sc2.BotAI):
def select_target(self):
if self.known_enemy_structures.exists:
return random.choice(self.known_enemy_structures).position
return self.enemy_start_locations[0]
async def on_step(self, iteration):
larvae = self.units(LARVA)
forces = self.units(ZERGLING) | self.units(CORRUPTOR) | self.units(BROODLORD)
if self.units(BROODLORD).amount > 2 and iteration % 50 == 0:
for unit in forces:
await self.do(unit.attack(self.select_target()))
if self.supply_left < 2:
if self.can_afford(OVERLORD) and larvae.exists:
await self.do(larvae.random.train(OVERLORD))
return
if self.units(GREATERSPIRE).ready.exists:
corruptors = self.units(CORRUPTOR)
# build half-and-half corruptors and broodlords
if corruptors.exists and corruptors.amount > self.units(BROODLORD).amount:
if self.can_afford(BROODLORD):
await self.do(corruptors.random.train(BROODLORD))
elif self.can_afford(CORRUPTOR) and larvae.exists:
await self.do(larvae.random.train(CORRUPTOR))
return
if not self.townhalls.exists:
for unit in self.units(DRONE) | self.units(QUEEN) | forces:
await self.do(unit.attack(self.enemy_start_locations[0]))
return
else:
hq = self.townhalls.first
for queen in self.units(QUEEN).idle:
abilities = await self.get_available_abilities(queen)
if AbilityId.EFFECT_INJECTLARVA in abilities:
await self.do(queen(EFFECT_INJECTLARVA, hq))
if not (self.units(SPAWNINGPOOL).exists or self.already_pending(SPAWNINGPOOL)):
if self.can_afford(SPAWNINGPOOL):
await self.build(SPAWNINGPOOL, near=hq)
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(LAIR).exists and not self.units(HIVE).exists and hq.noqueue:
if self.can_afford(LAIR):
await self.do(hq.build(LAIR))
if self.units(LAIR).ready.exists:
if not (self.units(INFESTATIONPIT).exists or self.already_pending(INFESTATIONPIT)):
if self.can_afford(INFESTATIONPIT):
await self.build(INFESTATIONPIT, near=hq)
if not (self.units(SPIRE).exists or self.already_pending(SPIRE)):
if self.can_afford(SPIRE):
await self.build(SPIRE, near=hq)
if self.units(INFESTATIONPIT).ready.exists and not self.units(HIVE).exists and hq.noqueue:
if self.can_afford(HIVE):
await self.do(hq.build(HIVE))
if self.units(HIVE).ready.exists:
spires = self.units(SPIRE).ready
if spires.exists:
spire = spires.random
if self.can_afford(GREATERSPIRE) and spire.noqueue:
await self.do(spire.build(GREATERSPIRE))
if self.units(EXTRACTOR).amount < 2 and not self.already_pending(EXTRACTOR):
if self.can_afford(EXTRACTOR):
drone = self.workers.random
target = self.state.vespene_geyser.closest_to(drone.position)
err = await self.do(drone.build(EXTRACTOR, target))
if hq.assigned_harvesters < hq.ideal_harvesters:
if self.can_afford(DRONE) and larvae.exists:
larva = larvae.random
await self.do(larva.train(DRONE))
return
for a in self.units(EXTRACTOR):
if a.assigned_harvesters < a.ideal_harvesters:
w = self.workers.closer_than(20, a)
if w.exists:
await self.do(w.random.gather(a))
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(QUEEN).exists and hq.is_ready and hq.noqueue:
if self.can_afford(QUEEN):
await self.do(hq.train(QUEEN))
if self.units(ZERGLING).amount < 40 and self.minerals > 1000:
if larvae.exists and self.can_afford(ZERGLING):
await self.do(larvae.random.train(ZERGLING))
def main():
sc2.run_game(sc2.maps.get("(2)CatalystLE"), [
Bot(Race.Zerg, BroodlordBot()),
Computer(Race.Terran, Difficulty.Medium)
], realtime=False, save_replay_as="ZvT.SC2Replay")
if __name__ == '__main__':
main()
| {
"repo_name": "Dentosal/python-sc2",
"path": "examples/zerg/onebase_broodlord.py",
"copies": "1",
"size": "4717",
"license": "mit",
"hash": 2135451834562277600,
"line_mean": 38.9745762712,
"line_max": 98,
"alpha_frac": 0.6039855841,
"autogenerated": false,
"ratio": 3.420594633792603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9514818115149405,
"avg_score": 0.0019524205486396939,
"num_lines": 118
} |
from functools import reduce
from operator import or_
from chamber.shortcuts import get_object_or_none
from django.utils.translation import ugettext
from django.db.models.query import QuerySet
from django.db.models import Q
from django.db.models.expressions import OrderBy
from django.utils.translation import ugettext
from .forms import RESTValidationError
from .exception import RESTException
from .utils.compatibility import get_last_parent_pk_field_name
from .utils.helpers import ModelIterableIteratorHelper
from .response import HeadersResponse
def _get_attr(obj, attr):
if '__' in attr:
rel_obj, rel_attr = attr.split('__')
return _get_attr(getattr(obj, rel_obj), rel_attr)
else:
return getattr(obj, attr)
class BasePaginator:
def get_response(self, qs, request):
raise NotImplementedError
class OffsetBasedPaginator(BasePaginator):
"""
REST paginator for list and querysets
"""
def __init__(self, max_offset=pow(2, 63) - 1, max_base=100, default_base=20):
self.max_offset = max_offset
self.max_base = max_base
self.default_base = default_base
def get_response(self, qs, request):
base = self._get_base(qs, request)
total = self._get_total(qs, request)
offset = self._get_offset(qs, request)
qs = qs[offset:(offset + base + 1)]
next_offset = self._get_next_offset(qs, offset, base, total)
prev_offset = self._get_prev_offset(qs, offset, base, total)
return HeadersResponse(
ModelIterableIteratorHelper(qs[:base], qs.model),
self.get_headers(total, next_offset, prev_offset)
)
def _get_offset(self, qs, request):
offset = request._rest_context.get('offset', '0')
if offset.isdigit():
offset_int = int(offset)
if offset_int > self.max_offset:
raise RESTException(ugettext('Offset must be lower or equal to {}').format(self.max_offset))
else:
return offset_int
else:
raise RESTException(ugettext('Offset must be natural number'))
def _get_base(self, qs, request):
base = request._rest_context.get('base')
if not base:
return self.default_base
elif base.isdigit():
base_int = int(base)
if base_int > self.max_base:
raise RESTException(ugettext('Base must lower or equal to {}').format(self.max_base))
else:
return base_int
else:
raise RESTException(ugettext('Base must be natural number or empty'))
def _get_total(self, qs, request):
if isinstance(qs, QuerySet):
return qs.count()
else:
return len(qs)
def _get_next_offset(self, qs, offset, base, total):
if total:
return offset + base if base and offset + base < total else None
else:
return offset + base if len(qs) > base else None
def _get_prev_offset(self, qs, offset, base, total):
return None if offset == 0 or not base else max(offset - base, 0)
def get_headers(self, total, next_offset, prev_offset):
return {
k: v for k, v in {
'X-Total': total,
'X-Next-Offset': next_offset,
'X-Prev-Offset': prev_offset,
}.items() if v is not None
}
class OffsetBasedPaginatorWithoutTotal(OffsetBasedPaginator):
def _get_total(self, qs, request):
return None
class CursorBasedModelIterableIteratorHelper(ModelIterableIteratorHelper):
def __init__(self, iterable, model, next):
super().__init__(iterable, model)
self.next = next
class CursorBasedPaginator(BasePaginator):
def __init__(self, max_base=100, default_base=20):
self.max_base = max_base
self.default_base = default_base
def get_response(self, qs, request):
base = self._get_base(request)
cursor = self._get_cursor(request)
ordering = self._get_ordering(request, qs)
cursor_based_model_iterable = self._get_paged_qs(qs, ordering, cursor, base)
return HeadersResponse(
cursor_based_model_iterable,
self.get_headers(cursor_based_model_iterable.next)
)
def _get_page_filter_kwargs(self, current_row, ordering):
ordering = list(ordering)
args_or = []
while ordering:
base_order_field_name = ordering.pop()
is_reverse = base_order_field_name.startswith('-')
base_order_field_name = self._get_field_name(base_order_field_name)
base_order_filtered_value = _get_attr(current_row, base_order_field_name)
if base_order_filtered_value is None:
if is_reverse:
filter_lookup = Q(**{'{}__isnull'.format(base_order_field_name): False})
else:
# skip this filter
continue
else:
if is_reverse:
filter_lookup = Q(
**{'{}__lt'.format(base_order_field_name): base_order_filtered_value}
)
else:
filter_lookup = Q(
**{'{}__gt'.format(base_order_field_name): base_order_filtered_value}
) | Q(
**{'{}__isnull'.format(base_order_field_name): True}
)
args_or.append(
Q(
filter_lookup,
Q(**{
self._get_field_name(order): _get_attr(
current_row, self._get_field_name(order)
) for order in ordering
})
)
)
return reduce(or_, args_or)
def _get_page(self, qs, base):
results = list(qs[:base + 1])
page = list(results[:base])
next_cursor = self._get_position_from_instance(page[-1]) if len(results) > len(page) else None
return CursorBasedModelIterableIteratorHelper(page, qs.model, next=next_cursor)
@property
def _get_paged_qs(self, qs, ordering, cursor, base):
qs = qs.order_by(*ordering)
if cursor:
current_row = get_object_or_none(qs, pk=cursor)
if current_row:
qs = qs.filter(self._get_page_filter_kwargs(current_row))
else:
raise RESTException(RESTValidationError(
ugettext('Cursor object was not found'),
code=ERROR_CODE.PAGINATION)
)
return self._get_page(qs, base)
def _get_base(self, request):
base = request._rest_context.get('base')
if not base:
return self.default_base
elif base.isdigit():
base_int = int(base)
if base_int > self.max_base:
raise RESTException(ugettext('Base must lower or equal to {}').format(self.max_base))
else:
return base_int
else:
raise RESTException(ugettext('Base must be natural number or empty'))
def _get_cursor(self, request):
return request._rest_context.get('cursor')
def _get_ordering(self, request, qs):
pk_field_name = get_last_parent_pk_field_name(qs.model)
query_ordering = list(qs.query.order_by) or list(qs.model._meta.ordering)
ordering = []
for order_lookup in query_ordering:
if isinstance(order_lookup, OrderBy):
ordering.append(
'-' + order_lookup.expression.name if order_lookup.descending else order_lookup.expression.name
)
else:
ordering.append(order_lookup)
if self._pk_field_name not in ordering:
ordering.append(pk_field_name)
return ordering
def _get_position_from_instance(self, instance):
pk_field_name = get_last_parent_pk_field_name(instance.__class__)
if isinstance(instance, dict):
attr = instance[pk_field_name]
else:
attr = getattr(instance, pk_field_name)
return str(attr)
def _get_field_name(self, order_lookup):
return order_lookup[1:] if order_lookup.startswith('-') else order_lookup
def get_headers(self, next_cursor):
return {
k: v for k, v in {
'X-Next-Cursor': next_cursor,
}.items() if v is not None
} | {
"repo_name": "druids/django-pyston",
"path": "pyston/paginator.py",
"copies": "1",
"size": "8554",
"license": "bsd-3-clause",
"hash": 3477494264881346000,
"line_mean": 33.6356275304,
"line_max": 115,
"alpha_frac": 0.5713116671,
"autogenerated": false,
"ratio": 4.063657957244655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5134969624344655,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import or_
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.http import JsonResponse
from cities_light.models import City, Country, Region
from dal import autocomplete
from pytz import country_timezones
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.groups.models import GroupMembership
from mozillians.phonebook.forms import get_timezones_list
from mozillians.users.models import IdpProfile, UserProfile
class BaseProfileAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
Return all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return UserProfile.objects.none()
qs = UserProfile.objects.complete()
self.q_base_filter = (Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class UsersAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
Return all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return User.objects.none()
qs = User.objects.all()
self.q_base_filter = (Q(userprofile__full_name__icontains=self.q)
| Q(email__icontains=self.q)
| Q(username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VoucherAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only users who can vouch."""
qs = super(VoucherAutocomplete, self).get_queryset().filter(can_vouch=True)
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VouchedAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
qs = super(VouchedAutocomplete, self).get_queryset().vouched()
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class CuratorsAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
# Allow only vouched users to perform this query.
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
qs = UserProfile.objects.vouched()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
def get_autocomplete_location_query(qs, q):
"""Return qs if ``istartswith`` filter exists, else fallback to ``icontains``."""
startswith_qs = qs.filter(name__istartswith=q)
if startswith_qs.exists():
return startswith_qs
return qs.filter(name__icontains=q)
class StaffProfilesAutocomplete(autocomplete.Select2QuerySetView):
def get_results(self, context):
"""Modify the text in the results of the group invitation form."""
results = []
for result in context['object_list']:
pk = self.get_result_value(result)
if not pk:
continue
profile = UserProfile.objects.get(pk=pk)
idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
text = self.get_result_label(result)
# Append the email used for login in the autocomplete text
if idp:
text += ' ({0})'.format(idp.email)
item = {
'id': pk,
'text': text
}
results.append(item)
return results
def get_queryset(self):
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
queries = []
# Query staff profiles
for domain in settings.AUTO_VOUCH_DOMAINS:
pks = IdpProfile.objects.filter(
email__endswith='@' + domain).values_list('profile__pk', flat=True)
queries.append(Q(pk__in=pks))
query = reduce(or_, queries)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class AccessGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(AccessGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
# Query NDA memberships
nda_members_ids = (
GroupMembership.objects.filter(Q(group__name=settings.NDA_GROUP)
| Q(group__name=settings.NDA_STAFF_GROUP))
.filter(status=GroupMembership.MEMBER).distinct()
.values_list('userprofile__pk', flat=True)
)
query = Q(pk__in=staff_ids) | Q(pk__in=nda_members_ids)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class NDAGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(NDAGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
mfa_idps_query = (IdpProfile.objects.filter(primary=True)
.filter(Q(type=IdpProfile.PROVIDER_GITHUB)
| Q(type=IdpProfile.PROVIDER_FIREFOX_ACCOUNTS)
| Q(type=IdpProfile.PROVIDER_GOOGLE)
| Q(type=IdpProfile.PROVIDER_LDAP)))
mfa_idps_pks = mfa_idps_query.values_list('profile__id', flat=True)
qs = UserProfile.objects.filter(Q(pk__in=mfa_idps_pks) | Q(pk__in=staff_ids))
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class CountryAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Country queryset from cities_light."""
if not self.request.user.is_authenticated():
return Country.objects.none()
qs = Country.objects.all()
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class RegionAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Region queryset from cities_light."""
country_id = self.forwarded.get('country')
if not self.request.user.is_authenticated():
return Region.objects.none()
qs = Region.objects.all()
if country_id:
country = Country.objects.get(id=country_id)
qs = qs.filter(country=country)
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class CityAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""City queryset from cities_light."""
region_id = self.forwarded.get('region')
country_id = self.forwarded.get('country')
if not self.request.user.is_authenticated():
return City.objects.none()
qs = City.objects.all()
if country_id:
country = Country.objects.get(id=country_id)
qs = qs.filter(country=country)
if region_id:
region = Region.objects.get(id=region_id)
qs = qs.filter(region=region, country=region.country)
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class TimeZoneAutocomplete(autocomplete.Select2ListView):
def get_list(self):
"""Timezone list provided from pytz."""
if not self.request.user.is_authenticated():
return []
return get_timezones_list()
def get(self, request, *args, **kwargs):
"""Override get method to tune the search."""
results = self.get_list()
country_id = self.forwarded.get('country')
region_id = self.forwarded.get('region')
city_id = self.forwarded.get('city')
country_code = None
# Try to get the timezone from the city, region, country
# forwarded values
if city_id:
city = City.objects.get(id=city_id)
country_code = city.country.code2
elif region_id:
region = Region.objects.get(id=region_id)
country_code = region.country.code2
elif country_id:
country = Country.objects.get(id=country_id)
country_code = country.code2
if country_code:
results = country_timezones(country_code)
if self.q:
results = [item for item in results if self.q.lower() in item.lower()]
return JsonResponse({
'results': [dict(id=x, text=x) for x in results]
})
| {
"repo_name": "mozilla/mozillians",
"path": "mozillians/users/views.py",
"copies": "2",
"size": "9979",
"license": "bsd-3-clause",
"hash": -1560303883583743200,
"line_mean": 32.7128378378,
"line_max": 98,
"alpha_frac": 0.5949493937,
"autogenerated": false,
"ratio": 4.0614570614570615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5656406455157061,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import or_
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.html import escape
from django.views import View
from tagging.models import Tag
from account.models import User
from account.permissions import is_admin_or_root
from blog.models import Blog
from contest.models import Contest
from problem.models import Problem
def query_user(kw):
results = list()
if kw and len(kw) >= 3:
for user in User.objects.filter(username__icontains=kw, is_active=True).exclude(username__icontains='#'). \
all().only('username')[:5]:
results.append(dict(title=escape(user.username), url=reverse('profile', kwargs=dict(pk=user.pk))))
return dict(name='User', results=results)
def get_problem_q_object(kw, all=False, managing=None):
if kw:
q_list = list()
if len(kw) >= 2:
q_list.append(Q(title__icontains=kw))
q_list.append(Q(alias__icontains=kw))
if kw.isdigit():
q_list.append(Q(pk__exact=kw))
if q_list:
q = reduce(or_, q_list)
priv_list = list()
if not all:
priv_list.append(Q(visible=True))
if managing:
priv_list.append(Q(managers=managing))
q &= reduce(or_, priv_list)
return q
return None
def sorted_query(problems, kw):
problems = list(problems)
ret = {p.pk: 0.0 for p in problems}
for p in problems:
if str(p.pk) == kw:
ret[p.pk] += 100
if p.alias == kw:
ret[p.pk] += 50
if p.title == kw:
ret[p.pk] += 30
return sorted(problems, key=lambda p: ret[p.pk], reverse=True)[:5]
def query_problem(kw, all=False):
results = list()
q = get_problem_q_object(kw, all)
if q:
for problem in sorted_query(
Problem.objects.defer("description", "input", "output", "hint").filter(q).distinct().all(), kw):
results.append(dict(title=escape(problem.title),
url=reverse('problem:detail', kwargs=dict(pk=problem.pk))))
return dict(name='Problem', results=results)
def query_blog(kw):
results = list()
if kw:
for blog in Blog.objects.filter(title__icontains=kw, visible=True).all()[:5]:
results.append(dict(title=escape(blog.title), url=reverse('blog:detail', kwargs={"pk": blog.pk})))
return dict(name='Blog', results=results)
def query_contest(kw):
results = list()
if kw:
for contest in Contest.objects.filter(title__icontains=kw, access_level__gt=0).all()[:5]:
results.append(
dict(title=escape(contest.title), url=reverse('contest:dashboard', kwargs={"cid": contest.pk})))
return dict(name='Contest', results=results)
def query_tag(kw):
results = list()
if kw:
for tag in Tag.objects.filter(name__icontains=kw).all()[:5]:
results.append(dict(title=escape(tag.name), url=reverse('problem:list') + '?tag=%s' % tag.name))
return dict(name='Tag', results=results)
class SearchAPI(View):
def get(self, request):
kw = request.GET.get('kw')
results = dict()
if kw:
results['user'] = query_user(kw)
results['problem'] = query_problem(kw, all=is_admin_or_root(request.user))
results['tag'] = query_tag(kw)
results['blog'] = query_blog(kw)
results['contest'] = query_contest(kw)
return JsonResponse(dict(results=results, action={
"url": reverse('search') + '?q=%s' % kw,
"text": "View all results"
}))
return JsonResponse(dict(results=results))
class SearchUserAPI(View):
def get(self, request):
kw = request.GET.get('kw')
results = list()
if kw:
if 'contest' in request.GET and request.GET['contest'].isdigit():
contest = request.GET['contest']
query_from = get_object_or_404(Contest, pk=contest).participants.filter(username__icontains=kw)
else:
query_from = User.objects.filter(username__icontains=kw, is_active=True)
for user in query_from.only('username', 'pk')[:5]:
results.append(dict(name=user.username, value=user.pk))
return JsonResponse(dict(success=True, results=results))
class SearchProblemAPI(View):
def get(self, request):
kw = request.GET.get('kw')
managing = request.user if request.GET.get('managing') else None
results = list()
q = get_problem_q_object(kw, is_admin_or_root(request.user), managing)
if q:
for problem in sorted_query(Problem.objects.filter(q).distinct().all(), kw):
results.append(dict(name=str(problem), value=problem.pk))
return JsonResponse(dict(success=True, results=results))
| {
"repo_name": "ultmaster/eoj3",
"path": "home/search_api.py",
"copies": "1",
"size": "4625",
"license": "mit",
"hash": 1188256850368374500,
"line_mean": 32.5144927536,
"line_max": 111,
"alpha_frac": 0.6534054054,
"autogenerated": false,
"ratio": 3.370991253644315,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45243966590443147,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from operator import xor
from docutils import nodes
from semantic_version import Version as StrictVersion, Spec
import six
class Version(StrictVersion):
"""
Version subclass toggling ``partial=True`` by default.
"""
def __init__(self, version_string, partial=True):
super(Version, self).__init__(version_string, partial)
# Issue type list (keys) + color values
ISSUE_TYPES = {"bug": "A04040", "feature": "40A056", "support": "4070A0"}
class Issue(nodes.Element):
# Technically, we just need number, but heck, you never know...
_cmp_keys = ("type", "number", "backported", "major")
@property
def type(self):
return self["type_"]
@property
def is_featurelike(self):
if self.type == "bug":
return self.major
else:
return not self.backported
@property
def is_buglike(self):
return not self.is_featurelike
@property
def backported(self):
return self.get("backported", False)
@property
def major(self):
return self.get("major", False)
@property
def number(self):
return self.get("number", None)
@property
def spec(self):
return self.get("spec", None)
def __eq__(self, other):
for attr in self._cmp_keys:
if getattr(self, attr, None) != getattr(other, attr, None):
return False
return True
def __hash__(self):
return reduce(xor, [hash(getattr(self, x)) for x in self._cmp_keys])
def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key
for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith("unreleased"))
]
def default_spec(self, manager):
"""
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
"""
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec()
def add_to_manager(self, manager):
"""
Given a 'manager' structure, add self to one or more of its 'buckets'.
"""
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith("unreleased")
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append("unreleased_bugfix")
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append("unreleased_feature")
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self)
def __repr__(self):
flag = ""
if self.backported:
flag = "backported"
elif self.major:
flag = "major"
elif self.spec:
flag = self.spec
if flag:
flag = " ({})".format(flag)
return "<{issue.type} #{issue.number}{flag}>".format(
issue=self, flag=flag
)
class Release(nodes.Element):
@property
def number(self):
return self["number"]
@property
def minor(self):
# TODO: use Version
return ".".join(self.number.split(".")[:-1])
@property
def family(self):
# TODO: use Version.major
# TODO: and probs just rename to .major, 'family' is dumb tbh
return int(self.number.split(".")[0])
def __repr__(self):
return "<release {}>".format(self.number)
| {
"repo_name": "bitprophet/releases",
"path": "releases/models.py",
"copies": "1",
"size": "7419",
"license": "bsd-2-clause",
"hash": -5430787993442334000,
"line_mean": 35.7277227723,
"line_max": 79,
"alpha_frac": 0.5888933819,
"autogenerated": false,
"ratio": 4.30586186883343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5394755250733431,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from optparse import make_option
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from timepiece import utils
from timepiece.entries.models import Entry
class Command(BaseCommand):
"""
Management command to check entries for overlapping times.
Use ./manage.py check_entries --help for more details
"""
# boiler plate for console programs using optparse
args = '[<first or last name>] [<first or last name>] ...'
help = ("Check the database for time entries that overlap.\n"
"Use --help for options.")
option_list = BaseCommand.option_list + (
make_option('--thisweek',
action='store_true',
dest='week',
default=False,
help='Show entries from this week only'),
make_option('--thismonth',
action='store_true',
dest='month',
default=False,
help='Show entries from this month only'),
make_option('-y', '--thisyear',
action='store_true',
dest='year',
default=False,
help='Show entries from this year only'),
make_option('-a', '--all', '--forever',
action='store_true',
dest='all',
default=False,
help='Show entries from all recorded history'),
make_option('-d', '--days',
dest='days',
type='int',
default=0,
help='Show entries for the last n days only'),
)
def usage(self, subcommand):
usage = "python manage.py check_entries {} [options]\n\n{}".format(
self.args, self.help)
return usage
def handle(self, *args, **kwargs):
verbosity = kwargs.get('verbosity', 1)
start = self.find_start(**kwargs)
users = self.find_users(*args)
self.show_init(start, *args, **kwargs)
all_entries = self.find_entries(users, start, *args, **kwargs)
all_overlaps = self.check_all(all_entries, *args, **kwargs)
if verbosity >= 1:
self.stdout.write('Total overlapping entries: %d' % all_overlaps)
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps
def find_start(self, **kwargs):
"""
Determine the starting point of the query using CLI keyword arguments
"""
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
# If no flags are True, set to the beginning of last billing window
# to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
# Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
def find_users(self, *args):
"""
Returns the users to search given names as args.
Return all users if there are no args provided.
"""
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q()) # noqa
users = User.objects.filter(names)
# If no args given, check every user
else:
users = User.objects.all()
# Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' % args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' % arg_list)
return users
def find_entries(self, users, start, *args, **kwargs):
"""
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
"""
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
# output methods
def show_init(self, start, *args, **kwargs):
forever = kwargs.get('all', False)
verbosity = kwargs.get('verbosity', 1)
if forever:
if verbosity >= 1:
self.stdout.write(
'Checking overlaps from the beginning of time')
else:
if verbosity >= 1:
self.stdout.write(
'Checking overlap starting on: ' + start.strftime('%m/%d/%Y'))
def show_name(self, user):
self.stdout.write('Checking %s %s...' % (user.first_name, user.last_name))
def show_overlap(self, entry_a, entry_b=None, **kwargs):
def make_output_data(entry):
return{
'first_name': entry.user.first_name,
'last_name': entry.user.last_name,
'entry': entry.id,
'start': entry.start_time,
'end': entry.end_time,
'project': entry.project
}
data_a = make_output_data(entry_a)
if entry_b:
data_b = make_output_data(entry_b)
output = ('Entry %(entry)d for %(first_name)s %(last_name)s from '
'%(start)s to %(end)s on %(project)s overlaps ' % data_a +
'entry %(entry)d from %(start)s to %(end)s on '
'%(project)s.' % data_b)
else:
output = ('Entry %(entry)d for %(first_name)s %(last_name)s from '
'%(start)s to %(end)s on %(project)s overlaps '
'with another entry.' % data_a)
if kwargs.get('verbosity', 1):
self.stdout.write(output)
| {
"repo_name": "caktus/django-timepiece",
"path": "timepiece/management/commands/check_entries.py",
"copies": "1",
"size": "8286",
"license": "mit",
"hash": -1305716946856635000,
"line_mean": 38.4571428571,
"line_max": 82,
"alpha_frac": 0.5300506879,
"autogenerated": false,
"ratio": 4.361052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391103319478947,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from optparse import OptionParser, make_option
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from timepiece import utils
from timepiece.entries.models import Entry
class Command(BaseCommand):
"""
Management command to check entries for overlapping times.
Use ./manage.py check_entries --help for more details
"""
# boiler plate for console programs using optparse
args = '<user\'s first or last name or user.id> <user\'s first...>...'
help = """Check the database for entries that overlap.
Use --help for options"""
parser = OptionParser()
parser.usage += """
./manage.py check_entries [<first or last name1> <name2>...<name n>] [OPTIONS]
For options type:
./manage.py check_entries --help
"""
def make_options(self, *args, **kwargs):
"""
Define the arguments that can be used with this command
"""
return (
# Jenkins arguments to ignore
make_option('--pep8-exclude',
dest='ignore_pep8',
type='str',
default='',
help='Jenkins only'),
make_option('--coverage-exclude',
dest='ignore_coverage',
type='str',
default='',
help='Jenkins only'),
make_option('--thisweek',
action='store_true',
dest='week',
default=False,
help='Show entries from this week only'),
make_option('--thismonth',
action='store_true',
dest='month',
default=False,
help='Show entries from this month only'),
make_option('-y', '--thisyear',
action='store_true',
dest='year',
default=False,
help='Show entries from this year only'),
make_option('-a', '--all', '--forever',
action='store_true',
dest='all',
default=False,
help='Show entries from all recorded history'),
make_option('-d', '--days',
dest='days',
type='int',
default=0,
help='Show entries for the last n days only'),
)
option_list = BaseCommand.option_list + make_options(*args)
parser.add_options(option_list)
(options, args) = parser.parse_args()
def handle(self, *args, **kwargs):
"""
main()
"""
verbosity = kwargs.get('verbosity', 1)
start = self.find_start(**kwargs)
users = self.find_users(*args)
self.show_init(start, *args, **kwargs)
all_entries = self.find_entries(users, start, *args, **kwargs)
all_overlaps = self.check_all(all_entries, *args, **kwargs)
if verbosity >= 1:
self.stdout.write('Total overlapping entries: %d' % all_overlaps)
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps
def find_start(self, **kwargs):
"""
Determine the starting point of the query using CLI keyword arguments
"""
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
# If no flags are True, set to the beginning of last billing window
# to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
# Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
def find_users(self, *args):
"""
Returns the users to search given names as args.
Return all users if there are no args provided.
"""
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q()) # noqa
users = User.objects.filter(names)
# If no args given, check every user
else:
users = User.objects.all()
# Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' % args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' % arg_list)
return users
def find_entries(self, users, start, *args, **kwargs):
"""
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
"""
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
# output methods
def show_init(self, start, *args, **kwargs):
forever = kwargs.get('all', False)
verbosity = kwargs.get('verbosity', 1)
if forever:
if verbosity >= 1:
self.stdout.write(
'Checking overlaps from the beginning of time')
else:
if verbosity >= 1:
self.stdout.write(
'Checking overlap starting on: ' + start.strftime('%m/%d/%Y'))
def show_name(self, user):
self.stdout.write('Checking %s %s...' % (user.first_name, user.last_name))
def show_overlap(self, entry_a, entry_b=None, **kwargs):
def make_output_data(entry):
return{
'first_name': entry.user.first_name,
'last_name': entry.user.last_name,
'entry': entry.id,
'start': entry.start_time,
'end': entry.end_time,
'project': entry.project
}
data_a = make_output_data(entry_a)
if entry_b:
data_b = make_output_data(entry_b)
output = ('Entry %(entry)d for %(first_name)s %(last_name)s from '
'%(start)s to %(end)s on %(project)s overlaps ' % data_a +
'entry %(entry)d from %(start)s to %(end)s on '
'%(project)s.' % data_b)
else:
output = ('Entry %(entry)d for %(first_name)s %(last_name)s from '
'%(start)s to %(end)s on %(project)s overlaps '
'with another entry.' % data_a)
if kwargs.get('verbosity', 1):
self.stdout.write(output)
| {
"repo_name": "BocuStudio/django-timepiece",
"path": "timepiece/management/commands/check_entries.py",
"copies": "2",
"size": "9169",
"license": "mit",
"hash": 8698935303657345000,
"line_mean": 38.3519313305,
"line_max": 82,
"alpha_frac": 0.5172865089,
"autogenerated": false,
"ratio": 4.457462323772484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031280742106879275,
"num_lines": 233
} |
from functools import reduce
from os import path
from collections import Mapping, MutableSequence
import keyword
import logging
import yaml
import numpy as np
from pkg_resources import resource_filename
from .. import geometry as geom
from .validator import Validator
class FrozenJSON(object):
"""A facade for navigating a JSON-like object
using attribute notation. Based on FrozenJSON from 'Fluent Python'
"""
@classmethod
def from_yaml(cls, path_to_file):
# load config file
with open(path_to_file) as f:
mapping = yaml.load(f)
obj = cls(mapping)
# save path for reference, helps debugging
obj._path_to_file = path_to_file
logger = logging.getLogger(__name__)
logger.debug('Loaded from file: {}'.format(obj._path_to_file))
return obj
def __new__(cls, arg):
if isinstance(arg, Mapping):
return super(FrozenJSON, cls).__new__(cls)
elif isinstance(arg, MutableSequence):
return [cls(item) for item in arg]
else:
return arg
def __init__(self, mapping):
self._logger = logging.getLogger(__name__)
self._logger.debug('Loaded with params: {}'.format(mapping))
self._path_to_file = None
self._data = {}
for key, value in mapping.items():
if keyword.iskeyword(key):
key += '_'
self._data[key] = value
def __getattr__(self, name):
if hasattr(self._data, name):
return getattr(self._data, name)
else:
return FrozenJSON(self._data[name])
def __dir__(self):
return self._data.keys()
def __getitem__(self, key):
value = self._data.get(key)
if value is None:
raise ValueError('No value was set in Config{}for key "{}", '
'available keys are: {}'
.format(self._path_to_file, key,
self._data.keys()))
return value
def __repr__(self):
if self._path_to_file:
return ('YASS config file loaded from: {}'
.format(self._path_to_file))
else:
return 'YASS config file loaded with: {}'.format(self._data)
class Config(FrozenJSON):
"""
A configuration object for the package, it is a read-only FrozenJSON that
inits from a yaml file with some caching capbilities to avoid
redundant and common computations
Notes
-----
After initialization, attributes cannot be changed
"""
def __init__(self, mapping):
mapping = self._validate(mapping)
super(Config, self).__init__(mapping)
self._logger = logging.getLogger(__name__)
# init the rest of the parameters, these parameters are used
# througout the pipeline so we compute them once to avoid redudant
# computations
# GEOMETRY PARAMETERS
path_to_geom = path.join(self.data.root_folder, self.data.geometry)
self._set_param('geom', geom.parse(path_to_geom, self.recordings.n_channels))
neighChannels = geom.find_channel_neighbors(self.geom,
self.recordings.spatial_radius)
self._set_param('neighChannels', neighChannels)
channelGroups = geom.make_channel_groups(self.recordings.n_channels,
self.neighChannels,
self.geom)
self._set_param('channelGroups', channelGroups)
self._logger.debug('Geometry parameters. Geom: {}, neighChannels: '
'{}, channelGroups {}'
.format(self.geom, self.neighChannels,
self.channelGroups))
# FIXME: REMOVE BATCH RELATED BELOW.
# THIS IS NOW DONE IN BATCH PROCESSOR
# BUFFER/SPIKE SIZE PARAMETERS
# compute spikeSize which is the number of observations for half
# the waveform
self._set_param('spikeSize',
int(np.round(self.recordings.spike_size_ms*self.recordings.sampling_rate/(2*1000))))
self._set_param('scaleToSave', 100)
self._set_param('BUFF', self.spikeSize*4)
self._set_param('templatesMaxShift', int(self.recordings.sampling_rate/1000))
self._set_param('stdFactor', 4)
file_size = path.getsize(path.join(self.data.root_folder, self.data.recordings))
# seems unused...
self._set_param('size', int(file_size/(sizeof(self.recordings.dtype)*self.recordings.n_channels)))
# BATCH PARAMETERS
self._set_param('dsize', sizeof(self.recordings.dtype))
batch_size = int(np.floor(self.resources.max_memory/(self.recordings.n_channels*self.dsize)))
if batch_size > self.size:
self._set_param('nBatches', 1)
self._set_param('batch_size', self.size)
self._set_param('residual', 0)
self._set_param('nPortion', 1)
else:
nBatches = int(np.ceil(float(self.size)/batch_size))
self._set_param('nBatches', nBatches)
self._set_param('batch_size', batch_size)
self._set_param('residual', self.size % batch_size)
self._set_param('nPortion', np.ceil(self.preprocess.templates_partial_data*self.nBatches))
self._logger.debug('Computed params: spikeSize: {}, scaleToSave: {}, '
'BUFF: {}, templatesMaxShift: {}, stdFactor: {}, '
'size: {}, dsize: {}, nBatches: {}, batch_size: {}'
', residual: {}, nPortion: {}'
.format(self.spikeSize, self.scaleToSave,
self.BUFF, self.templatesMaxShift,
self.stdFactor, self.size,
self.dsize, self.nBatches,
self.batch_size, self.residual,
self.nPortion))
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError('Cannot set values once the object has '
'been initialized')
else:
self.__dict__[name] = value
def _set_param(self, name, value):
"""
Internal setattr method to set new parameters, only used to fill the
parameters that need to be computed *right after* initialization
"""
self._data[name] = value
def _validate(self, mapping):
"""Validate values in the input dictionary
"""
path_to_validator = resource_filename('yass',
'assets/config/validator.yaml')
with open(path_to_validator) as f:
validator_content = yaml.load(f)
validator = Validator(mapping, **validator_content)
mapping = validator.validate()
return mapping
def _pretty_iterator(self, it):
return reduce(lambda x, y: x+', '+y, it)
def sizeof(dtype):
SIZE_ = {'int16': 2,
'uint16': 2,
'single': 4,
'double': 8}
return SIZE_[dtype]
| {
"repo_name": "jinhyunglee/yass",
"path": "src/yass/config/config.py",
"copies": "1",
"size": "7292",
"license": "apache-2.0",
"hash": -760875185414757900,
"line_mean": 34.3980582524,
"line_max": 108,
"alpha_frac": 0.5555403182,
"autogenerated": false,
"ratio": 4.264327485380117,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004098509546906021,
"num_lines": 206
} |
from functools import reduce
from os import system
from os.path import exists
import requests
from xlrd import open_workbook
from django.conf import settings
from django.db.models import Q
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
from hospitals.models import Hospital, HospitalLocation, HospitalType
class Command(BaseCommand):
help = 'Ingests the latest PDF list with supported hospitals'
API_KEY = settings.PDF_TABLES_API_KEY
DOWNLOAD_URL = 'http://www.bulstradlife.bg/uploads/%D0%9E%D0%9A_BLVIG_Spisak_lechebni_zavedenia.pdf'
CONVERT_URL = 'https://pdftables.com/api?key={0}&format=xlsx'.format(API_KEY)
PDF_FILENAME = 'bulstrad.pdf'
XLSX_FILENAME = 'bulstrad.xlsx'
CHUNK_SIZE = 4096
def add_arguments(self, parser):
parser.add_argument('--download', default=False, type=bool)
def handle(self, **options):
if options.get('download') is True:
self.__download_file(self.DOWNLOAD_URL, self.PDF_FILENAME)
self.__download_converted_xlsx()
if not exists(self.XLSX_FILENAME):
command_msg = 'No local file found. Download one by invoking \
the command with `--download=true`'
raise CommandError(command_msg)
type_data = self.__extract_types()
self.__populate_types(type_data)
self.__create_missing_types()
data = self.__get_xlsx_data()
self.__populate_hospitals_and_locations(data)
def __populate_types(self, data):
for type_name in data:
try:
hospital_type = self.__populate_type(type_name)
self.stdout.write('Created {0}'.format(hospital_type))
except IntegrityError:
print('Duplicate record skipped')
def __create_missing_types(self):
for name in ('АИПСИМП', 'СБАЛК', 'ААГПСМП', 'МОБАЛ'):
try:
HospitalType.objects.create(name=name)
except IntegrityError:
print('Duplicate record skipped')
def __populate_hospitals_and_locations(self, data):
for location, name, address, out_of_hospital_help, hospital_help, laboratory_help in zip(*data):
try:
hospital_type = self.__find_type(name)
location = self.__populate_location(location)
hospital = self.__populate_hospital(name, location, address,
self.__is_checked(out_of_hospital_help),
self.__is_checked(hospital_help),
self.__is_checked(laboratory_help),
hospital_type)
self.stdout.write('Created {0}'.format(hospital))
except IntegrityError:
print('Duplicate record skipped')
def __find_type(self, name):
if not name:
return None
condition = reduce(lambda x, y: x | y, [Q(name__contains=word) for word in name.split()])
types = HospitalType.objects.filter(condition)
return types[0] if len(types) > 0 else None
def __is_checked(self, field):
return field == 'X'
def __download_file(self, url, filename, **kwargs):
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=self.CHUNK_SIZE):
if chunk:
f.write(chunk)
f.flush()
def __download_converted_xlsx(self):
# it's weird, but it's faster than
# POSTing with requests and properly piping data
system('curl -F f=@{0} "{1}" > {2}'.format(self.PDF_FILENAME,
self.CONVERT_URL,
self.XLSX_FILENAME))
def __extract_types(self):
workbook = open_workbook(self.XLSX_FILENAME)
sheet = workbook.sheet_by_index(0)
# Hospital Types:
# 2 : 10 to pass the header and blank entries
types = sheet.col_values(2, 2, 10)
types += sheet.col_values(3, 2, 7)
# handle the messed up (reverse) descriptions and names
types_names = sheet.col_values(4, 6, 9)
types_descriptions = sheet.col_values(3, 7, 10)
for type_name, type_description in zip(types_names, types_descriptions):
types.append('{0}{1}'.format(type_name, type_description))
# handle two-line description
types[0] = '{0} {1}'.format(types[0], types.pop(1))
types[1] = '{0} {1}'.format(types[1], types.pop(2))
types[3] = '{0} {1}'.format(types[3], types.pop(4))
return types
def __extract_first_sheet(self, sheet):
# 13 : -4 to pass the first non-locations entries
# and -4 to remove the last non-locations entries
locations = sheet.col_values(1, 13, -1)
names = sheet.col_values(2, 13, -1)
addresses = sheet.col_values(3, 13, -1)
out_of_hospital_helps = sheet.col_values(4, 13, -1)
hospital_helps = sheet.col_values(5, 13, -1)
laboratory_helps = sheet.col_values(6, 13, -1)
return [locations, names, addresses,
out_of_hospital_helps, hospital_helps, laboratory_helps]
def __extract_middle_sheet(self, sheet):
# 3 : -1 to pass the first non-locations entries
# and -1 to remove the last non-locations entries
locations = sheet.col_values(1, 3, -1)
names = sheet.col_values(2, 3, -1)
addresses = sheet.col_values(3, 3, -1)
out_of_hospital_helps = sheet.col_values(4, 3, -1)
hospital_helps = sheet.col_values(5, 3, -1)
laboratory_helps = sheet.col_values(6, 3, -1)
return [locations, names, addresses,
out_of_hospital_helps, hospital_helps, laboratory_helps]
def __extract_shifted_middle_sheet(self, sheet):
# 3 : -1 to pass the first non-locations entries
# and -1 to remove the last non-locations entries
locations = sheet.col_values(0, 3, -1)
names = sheet.col_values(1, 3, -1)
addresses = sheet.col_values(2, 3, -1)
out_of_hospital_helps = sheet.col_values(3, 3, -1)
hospital_helps = sheet.col_values(4, 3, -1)
laboratory_helps = sheet.col_values(5, 3, -1)
return [locations, names, addresses,
out_of_hospital_helps, hospital_helps, laboratory_helps]
def __extract_last_sheet(self, sheet):
# 3 : -1 to pass the first non-locations entries
# and -1 to remove the last non-locations entries
locations = sheet.col_values(1, 3, -4)
names = sheet.col_values(2, 3, -4)
addresses = sheet.col_values(3, 3, -4)
out_of_hospital_helps = sheet.col_values(4, 3, -4)
hospital_helps = sheet.col_values(5, 3, -4)
laboratory_helps = sheet.col_values(6, 3, -4)
return [locations, names, addresses,
out_of_hospital_helps, hospital_helps, laboratory_helps]
def __get_xlsx_data(self):
workbook = open_workbook(self.XLSX_FILENAME)
data = self.__extract_first_sheet(workbook.sheet_by_index(0))
for i in range(1, 3):
sheet = workbook.sheet_by_index(i)
new_data = self.__extract_middle_sheet(sheet)
data = self.__flatten_data(data, new_data)
last_sheet_index = workbook.nsheets - 1
for i in range(3, last_sheet_index):
sheet = workbook.sheet_by_index(i)
new_data = self.__extract_shifted_middle_sheet(sheet)
data = self.__flatten_data(data, new_data)
last_sheet = workbook.sheet_by_index(last_sheet_index)
new_data = self.__extract_last_sheet(last_sheet)
return self.__flatten_data(data, new_data)
def __flatten_data(self, accumulated_data, new_data):
for i, (old_column, new_column) in enumerate(zip(accumulated_data, new_data)):
old_column.extend(new_column)
accumulated_data[i] = old_column
return accumulated_data
def __populate_hospital(self, name, location, address, out_of_hospital_help,
hospital_help, laboratory_help, hospital_type):
return Hospital.objects.get_or_create(name=name,
location=location,
address=address,
out_of_hospital_help=out_of_hospital_help,
hospital_help=hospital_help,
laboratory_help=laboratory_help,
type=hospital_type)[0]
def __populate_location(self, name):
return HospitalLocation.objects.get_or_create(name=name)[0]
def __populate_type(self, name):
return HospitalType.objects.get_or_create(name=name)[0]
| {
"repo_name": "syndbg/django-bulstrad",
"path": "hospitals/management/commands/ingest.py",
"copies": "1",
"size": "9093",
"license": "mit",
"hash": -4684034911744370000,
"line_mean": 41.7783018868,
"line_max": 104,
"alpha_frac": 0.576469291,
"autogenerated": false,
"ratio": 3.5803395183576785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46568088093576787,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pprint import pformat
from six import viewkeys
from six.moves import map, zip
from toolz import curry, flip
from .sentinel import sentinel
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Example
-------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Example
-------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Example
-------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel('_no_default')
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Example
-------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Usage
-----
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
)
| {
"repo_name": "florentchandelier/zipline",
"path": "zipline/utils/functional.py",
"copies": "2",
"size": "9650",
"license": "apache-2.0",
"hash": -8733192931981522000,
"line_mean": 23.6173469388,
"line_max": 79,
"alpha_frac": 0.552746114,
"autogenerated": false,
"ratio": 3.958162428219852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 392
} |
from functools import reduce
from pprint import pformat
from six import viewkeys
from six.moves import map, zip
from toolz import curry, flip
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
from zipline.utils.sentinel import sentinel
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Example
-------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Example
-------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Example
-------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel('_no_default')
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Example
-------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Usage
-----
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
)
| {
"repo_name": "magne-max/zipline-ja",
"path": "zipline/utils/functional.py",
"copies": "1",
"size": "9663",
"license": "apache-2.0",
"hash": 4142193595594846000,
"line_mean": 23.6505102041,
"line_max": 79,
"alpha_frac": 0.5532443341,
"autogenerated": false,
"ratio": 3.957002457002457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005797773654916512,
"num_lines": 392
} |
from functools import reduce
from pymongo import MongoClient
import osm_address
__author__ = 'tmshv'
"""
Populate documents with OSM_ID by matching document address & osm_features.tsv
"""
mongo = MongoClient()
db = mongo['k2']
osm_dict = osm_address.get_streets_dict()
def find_osm_id(doc):
street, n = doc['address'].split(',')
street = street.lower()
n = n.strip()
if street in osm_dict:
osm_nums = osm_dict[street]
if n in osm_nums:
return 'WOW'
else:
pass
print(street, n, osm_nums)
else:
pass
# print('no street', street)
# for osm in osm_address.addresses:
# osm_street, osm_n, osm_id = osm
#
# if street == osm_street:
# if n == osm_n:
# return osm_id
return None
targets = db.objects.find({'osm_id': {'$exists': False}})
print('Founded %d records with no OSM_ID' % targets.count())
targets = list(targets)[:100]
# print('Updating')
# print(osm_dict.keys())
# print('22-я линия В.О.' in osm_dict.keys())
for doc in targets:
osm_id = find_osm_id(doc)
if osm_id is not None:
print(doc['name'])
# print('Done')
| {
"repo_name": "tmshv/k2",
"path": "Tools/db_populate_with_osm_id.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": -999569642497826600,
"line_mean": 18.7049180328,
"line_max": 78,
"alpha_frac": 0.5765391015,
"autogenerated": false,
"ratio": 2.9826302729528535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40591693744528534,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pymongo import MongoClient
import re
__author__ = 'tmshv'
"""
Correct 'address' field
"""
mongo = MongoClient()
db = mongo['k2']
rules = [
[{'function': {'$exists': False}}, {'function': 'unknown'}],
[{'function': 'unknown', 'name': re.compile('[Дд]етский сад')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('школа')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('^Школа')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('барачная больница')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('^.бщежитие|бщежитие$')}, {'function': 'living'}],
[{'function': 'unknown', 'name': re.compile('[Кк]отельная')}, {'function': 'service'}],
[{'function': 'unknown', 'name': re.compile('[Бб]ольница')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('[Сс]тадион')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('[Ии]нститут')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('[Уу]ниверситет')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('^[Зз]авод')}, {'function': 'production'}],
[{'function': 'unknown', 'name': re.compile('[Фф]абрика')}, {'function': 'production'}],
[{'function': 'unknown', 'name': re.compile('[Дд]ворец')}, {'function': 'public'}],
[{'function': 'unknown', 'name': re.compile('подстан')}, {'function': 'service'}],
[{'function': 'unknown', 'name': re.compile('училище')}, {'function': 'social'}],
[{'function': 'unknown', 'name': re.compile('[Сс]клад')}, {'function': 'service'}],
[{'name': "Пожарная часть на заводе \"Красный Треугольник\""}, {'function': 'service'}],
[{'name': "Ленполиграфмаш. Заводоуправление - Бизнес-центр \"Карповка\""}, {'function': 'public'}],
[{'name': "Дворец культуры им. В. И. Ленина завода \"Большевик\""}, {'function': 'public'}],
[{'name': "Школа . Флигель - Магазин автозапчастей \"ABS-Сервис\""}, {'function': 'service'}],
[{'name': 'Общежитие - Здание общественного назначения'}, {'function': 'admin'}],
[{'name': "Электростанция \"Уткина заводь\" - ГРЭС \"Красный Октябрь\" - ТЭЦ-5"}, {'function': 'service'}],
[{'name': "Водонапорная башня гидролизного завода"}, {'function': 'service'}],
]
for rule in rules:
query, update = rule
db.objects.update_many(query, {'$set': update})
# m = db.objects.find(query)
# print(rule)
# print(m.count())
# print()
print('Done')
| {
"repo_name": "tmshv/k2",
"path": "Tools/db_set_functions_by_name.py",
"copies": "1",
"size": "2997",
"license": "mit",
"hash": -7833306507029489000,
"line_mean": 44.8928571429,
"line_max": 111,
"alpha_frac": 0.5887159533,
"autogenerated": false,
"ratio": 2.4735322425409048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3562248195840905,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pymongo import MongoClient
__author__ = 'tmshv'
"""
Correct 'address' field
"""
mongo = MongoClient()
db = mongo['k2']
BSON_ARRAY = 4
fix_rules_street = [
['Обводного набережная канала', 'Набережная Обводного канала'],
['проспект Большой ПС', 'Большой проспект П.С.'],
['проспект Большой ВО', 'Большой проспект В.О.'],
['5-я линия ВО', '5-я линия В.О.'],
['6-я линия ВО', '6-я линия В.О.'],
['8-я линия ВО', '8-я линия В.О.'],
['13-я линия ВО', '13-я линия В.О.'],
['14-я линия ВО', '14-я линия В.О.'],
['17-я линия ВО', '17-я линия В.О.'],
['18-я линия ВО', '18-я линия В.О.'],
['20-я линия ВО', '20-я линия В.О.'],
['21-я линия ВО', '21-я линия В.О.'],
['22-я линия ВО', '22-я линия В.О.'],
['23-я линия ВО', '23-я линия В.О.'],
['25-я линия ВО', '25-я линия В.О.'],
['27-я линия ВО', '27-я линия В.О.'],
['Косая линия ВО', 'Косая линия В.О.'],
]
def fix(doc):
street, n = doc['address'].split(',')
street = reduce(lambda street, rule: rule[1] if street == rule[0] else street, fix_rules_street, street)
doc['address'] = '{street}, {n}'.format(street=street, n=n.lower())
return doc
targets = db.objects.find({})
print('Founded %d records with "address"' % targets.count())
print('Updating')
for doc in map(fix, targets):
db.objects.update({'_id': doc['_id']}, doc)
print('Done') | {
"repo_name": "tmshv/k2",
"path": "Tools/db_fix_address.py",
"copies": "1",
"size": "1769",
"license": "mit",
"hash": -86851717822465780,
"line_mean": 24.8181818182,
"line_max": 108,
"alpha_frac": 0.5757575758,
"autogenerated": false,
"ratio": 1.780426599749059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7848357140199088,
"avg_score": 0.0015654070699942261,
"num_lines": 55
} |
from functools import reduce
from pymongo import MongoClient
__author__ = 'tmshv'
"""
Correct 'years' field.
Transform '1911-1951, 1953' -> [[1911, 1951], [1953]]
"""
def periods(raw):
"""
Transform '1911-1951, 1953' -> [[1911, 1951], [1953]]
:param raw: string like '1911-1951, 1953'
:return: list like [[1911, 1951], [1953]]
"""
def raw_period(period):
if '-' in period:
return tuple(map(lambda year: int(year), period.split('-')))
else:
return int(period)
if type(raw) != str:
return []
return list(map(raw_period, raw.split(',')))
mongo = MongoClient()
db = mongo['k2']
BSON_ARRAY = 4
BSON_STRING = 2
def fix(doc):
doc['years'] = periods(doc['years'][0])
return doc
# targets = db.objects.find({'years': {'$type': BSON_STRING}})
targets = db.objects.find({'$where': 'typeof this.years[0] === "string"'})
print('Founded %d records with stringed "years" field' % targets.count())
print('Updating')
for doc in map(fix, targets):
# print(doc['years'])
db.objects.update({'_id': doc['_id']}, doc)
print('Done')
| {
"repo_name": "tmshv/k2",
"path": "Tools/db_fix_years.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": -9209754699105101000,
"line_mean": 20.1886792453,
"line_max": 74,
"alpha_frac": 0.589492431,
"autogenerated": false,
"ratio": 3.110803324099723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4200295755099723,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pyquantum.qgate import QGate, X, I, P0, P1
import numpy as np
# Used by QuantumCircuit to handle gates controlled by gates on a position
# lower than index
class ControlledQGate(QGate):
# Creates a new quantum gate on a given index of some column of a
# quantum circuit
def __init__(self, index, gate):
self.index = index
self.gate = gate
self.matrix = self.__gen_matrix().matrix
self.name = "C" + str(gate)
# Private function used to generate the gate matrix representation
def __gen_matrix(self):
def aux(n):
if n == 1:
return P0.tensor(I) + P1.tensor(self.gate)
p = QGate.tensor_of_gates([P0] + [I for _ in range(n)])
return p + P1.tensor(aux(n-1))
return aux(self.index)
# Quantum circuit simulation
class QuantumCircuit:
# Initializes a quantum circuit that works on n qubits and consists of
# a defined number of steps (columns). Initializes it with the Pauli I
# gate on every position
def __init__(self, nqubits, steps):
self.nqubits = nqubits
self.circuit = np.array([
[I for j in range(nqubits)] for i in range(steps)])
# Concatenates the circuit with another one that works on the same number
# of qubits
def concat(self, other):
if self.nqubits != other.nqubits:
raise ValueError("Both circuits must work on the same number of qubits")
new = QuantumCircuit(self.nqubits, len(self.circuit) + len(other.circuit))
new.circuit = np.concatenate((self.circuit, other.circuit), axis=0)
return new
# Adds a gate on position i (column), j (row). Fails if there is some
# controlled gate on the same column on some row with index greater than j
def add_gate(self, i, j, gate):
if self.__has_controlled_gate(self.circuit[i, j+1:]):
raise ValueError("Cannot apply a gate to a qubit that is controlling")
self.circuit[i, j] = gate
# Adds a controlled gate on column i, row j that is controlled by all the
# previous qubits on the same column
def add_controlled_gate(self, i, j, gate):
if np.any(list(map(lambda g: g != I, self.circuit[i, :j]))):
raise ValueError("Cannot add a controlled gate where the previous gates are not I")
self.circuit[i, j] = ControlledQGate(j, gate)
# Private method that checks if there is a controllled gate on a list of
# gates
def __has_controlled_gate(self, gates):
return np.any(list(map(lambda g: isinstance(g, ControlledQGate), gates)))
# Converts the circuit from an ordered array of gates to a single gate
def asgate(self):
def column_gate(col):
if self.__has_controlled_gate(col):
i = next(i for i,g in enumerate(col) if isinstance(g, ControlledQGate))
return QGate.tensor_of_gates(col[i:])
return QGate.tensor_of_gates(col)
gates = map(column_gate, self.circuit)
return reduce(np.dot, reversed(list(gates)))
def __str__(self):
gates_by_row = np.transpose(self.circuit)
result = ''
for r, row in enumerate(gates_by_row):
result += '---'
for c, gate in enumerate(row):
if self.__has_controlled_gate(self.circuit[c, r+1:]):
result += 'o---'
else:
g = str(gate)[1:] if isinstance(gate, ControlledQGate) else str(gate)
result += g + '-'*(4 - len(g)) if gate != I else '----'
if r != len(gates_by_row) - 1:
result += '\n---'
for c, gate in enumerate(row):
if self.__has_controlled_gate(self.circuit[c, r+1:]):
result += '|---'
else:
result += '----'
result += '\n'
return result
__repr__ = __str__
# Private function that creates a CNOT gate
def __gen_cnot():
circ = QuantumCircuit(2, 1)
circ.add_controlled_gate(0, 1, X)
return circ
# Private function that creates a TOFFOLI gate
def __gen_toffoli():
circ = QuantumCircuit(3, 1)
circ.add_controlled_gate(0, 2, X)
return circ
# CNOT gate circuit
CNOT = __gen_cnot()
# TOFFOLI gate circuit
TOFFOLI = __gen_toffoli()
| {
"repo_name": "miguelfrde/pyquantum",
"path": "pyquantum/qcircuit.py",
"copies": "1",
"size": "4393",
"license": "mit",
"hash": 7779948624816765000,
"line_mean": 36.8706896552,
"line_max": 95,
"alpha_frac": 0.5920783064,
"autogenerated": false,
"ratio": 3.6008196721311476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46928979785311475,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from ..Qt import QtGui, QtCore, isQObjectAlive
from ..GraphicsScene import GraphicsScene
from ..Point import Point
from .. import functions as fn
import weakref
import operator
from ..util.lru_cache import LRUCache
class GraphicsItem(object):
"""
**Bases:** :class:`object`
Abstract class providing useful methods to GraphicsObject and GraphicsWidget.
(This is required because we cannot have multiple inheritance with QObject subclasses.)
A note about Qt's GraphicsView framework:
The GraphicsView system places a lot of emphasis on the notion that the graphics within the scene should be device independent--you should be able to take the same graphics and display them on screens of different resolutions, printers, export to SVG, etc. This is nice in principle, but causes me a lot of headache in practice. It means that I have to circumvent all the device-independent expectations any time I want to operate in pixel coordinates rather than arbitrary scene coordinates. A lot of the code in GraphicsItem is devoted to this task--keeping track of view widgets and device transforms, computing the size and shape of a pixel in local item coordinates, etc. Note that in item coordinates, a pixel does not have to be square or even rectangular, so just asking how to increase a bounding rect by 2px can be a rather complex task.
"""
_pixelVectorGlobalCache = LRUCache(100, 70)
def __init__(self, register=True):
if not hasattr(self, '_qtBaseClass'):
for b in self.__class__.__bases__:
if issubclass(b, QtGui.QGraphicsItem):
self.__class__._qtBaseClass = b
break
if not hasattr(self, '_qtBaseClass'):
raise Exception('Could not determine Qt base class for GraphicsItem: %s' % str(self))
self._pixelVectorCache = [None, None]
self._viewWidget = None
self._viewBox = None
self._connectedView = None
self._exportOpts = False ## If False, not currently exporting. Otherwise, contains dict of export options.
if register:
GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
def getViewWidget(self):
"""
Return the view widget for this item.
If the scene has multiple views, only the first view is returned.
The return value is cached; clear the cached value with forgetViewWidget().
If the view has been deleted by Qt, return None.
"""
if self._viewWidget is None:
scene = self.scene()
if scene is None:
return None
views = scene.views()
if len(views) < 1:
return None
self._viewWidget = weakref.ref(self.scene().views()[0])
v = self._viewWidget()
if v is not None and not isQObjectAlive(v):
return None
return v
def forgetViewWidget(self):
self._viewWidget = None
def getViewBox(self):
"""
Return the first ViewBox or GraphicsView which bounds this item's visible space.
If this item is not contained within a ViewBox, then the GraphicsView is returned.
If the item is contained inside nested ViewBoxes, then the inner-most ViewBox is returned.
The result is cached; clear the cache with forgetViewBox()
"""
if self._viewBox is None:
p = self
while True:
try:
p = p.parentItem()
except RuntimeError: ## sometimes happens as items are being removed from a scene and collected.
return None
if p is None:
vb = self.getViewWidget()
if vb is None:
return None
else:
self._viewBox = weakref.ref(vb)
break
if hasattr(p, 'implements') and p.implements('ViewBox'):
self._viewBox = weakref.ref(p)
break
return self._viewBox() ## If we made it this far, _viewBox is definitely not None
def forgetViewBox(self):
self._viewBox = None
def deviceTransform(self, viewportTransform=None):
"""
Return the transform that converts local item coordinates to device coordinates (usually pixels).
Extends deviceTransform to automatically determine the viewportTransform.
"""
if self._exportOpts is not False and 'painter' in self._exportOpts: ## currently exporting; device transform may be different.
return self._exportOpts['painter'].deviceTransform() * self.sceneTransform()
if viewportTransform is None:
view = self.getViewWidget()
if view is None:
return None
viewportTransform = view.viewportTransform()
dt = self._qtBaseClass.deviceTransform(self, viewportTransform)
#xmag = abs(dt.m11())+abs(dt.m12())
#ymag = abs(dt.m21())+abs(dt.m22())
#if xmag * ymag == 0:
if dt.determinant() == 0: ## occurs when deviceTransform is invalid because widget has not been displayed
return None
else:
return dt
def viewTransform(self):
"""Return the transform that maps from local coordinates to the item's ViewBox coordinates
If there is no ViewBox, return the scene transform.
Returns None if the item does not have a view."""
view = self.getViewBox()
if view is None:
return None
if hasattr(view, 'implements') and view.implements('ViewBox'):
tr = self.itemTransform(view.innerSceneItem())
if isinstance(tr, tuple):
tr = tr[0] ## difference between pyside and pyqt
return tr
else:
return self.sceneTransform()
#return self.deviceTransform(view.viewportTransform())
def getBoundingParents(self):
"""Return a list of parents to this item that have child clipping enabled."""
p = self
parents = []
while True:
p = p.parentItem()
if p is None:
break
if p.flags() & self.ItemClipsChildrenToShape:
parents.append(p)
return parents
def viewRect(self):
"""Return the visible bounds of this item's ViewBox or GraphicsWidget,
in the local coordinate system of the item."""
view = self.getViewBox()
if view is None:
return None
bounds = self.mapRectFromView(view.viewRect())
if bounds is None:
return None
bounds = bounds.normalized()
## nah.
#for p in self.getBoundingParents():
#bounds &= self.mapRectFromScene(p.sceneBoundingRect())
return bounds
def pixelVectors(self, direction=None):
"""Return vectors in local coordinates representing the width and height of a view pixel.
If direction is specified, then return vectors parallel and orthogonal to it.
Return (None, None) if pixel size is not yet defined (usually because the item has not yet been displayed)
or if pixel size is below floating-point precision limit.
"""
## This is an expensive function that gets called very frequently.
## We have two levels of cache to try speeding things up.
dt = self.deviceTransform()
if dt is None:
return None, None
## Ignore translation. If the translation is much larger than the scale
## (such as when looking at unix timestamps), we can get floating-point errors.
dt.setMatrix(dt.m11(), dt.m12(), 0, dt.m21(), dt.m22(), 0, 0, 0, 1)
## check local cache
if direction is None and dt == self._pixelVectorCache[0]:
return tuple(map(Point, self._pixelVectorCache[1])) ## return a *copy*
## check global cache
#key = (dt.m11(), dt.m21(), dt.m31(), dt.m12(), dt.m22(), dt.m32(), dt.m31(), dt.m32())
key = (dt.m11(), dt.m21(), dt.m12(), dt.m22())
pv = self._pixelVectorGlobalCache.get(key, None)
if direction is None and pv is not None:
self._pixelVectorCache = [dt, pv]
return tuple(map(Point,pv)) ## return a *copy*
if direction is None:
direction = QtCore.QPointF(1, 0)
if direction.manhattanLength() == 0:
raise Exception("Cannot compute pixel length for 0-length vector.")
## attempt to re-scale direction vector to fit within the precision of the coordinate system
## Here's the problem: we need to map the vector 'direction' from the item to the device, via transform 'dt'.
## In some extreme cases, this mapping can fail unless the length of 'direction' is cleverly chosen.
## Example:
## dt = [ 1, 0, 2
## 0, 2, 1e20
## 0, 0, 1 ]
## Then we map the origin (0,0) and direction (0,1) and get:
## o' = 2,1e20
## d' = 2,1e20 <-- should be 1e20+2, but this can't be represented with a 32-bit float
##
## |o' - d'| == 0 <-- this is the problem.
## Perhaps the easiest solution is to exclude the transformation column from dt. Does this cause any other problems?
#if direction.x() == 0:
#r = abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))
##r = 1.0/(abs(dt.m12()) + abs(dt.m22()))
#elif direction.y() == 0:
#r = abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))
##r = 1.0/(abs(dt.m11()) + abs(dt.m21()))
#else:
#r = ((abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))) * (abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))))**0.5
#if r == 0:
#r = 1. ## shouldn't need to do this; probably means the math above is wrong?
#directionr = direction * r
directionr = direction
## map direction vector onto device
#viewDir = Point(dt.map(directionr) - dt.map(Point(0,0)))
#mdirection = dt.map(directionr)
dirLine = QtCore.QLineF(QtCore.QPointF(0,0), directionr)
viewDir = dt.map(dirLine)
if viewDir.length() == 0:
return None, None ## pixel size cannot be represented on this scale
## get unit vector and orthogonal vector (length of pixel)
#orthoDir = Point(viewDir[1], -viewDir[0]) ## orthogonal to line in pixel-space
try:
normView = viewDir.unitVector()
#normView = viewDir.norm() ## direction of one pixel orthogonal to line
normOrtho = normView.normalVector()
#normOrtho = orthoDir.norm()
except:
raise Exception("Invalid direction %s" %directionr)
## map back to item
dti = fn.invertQTransform(dt)
#pv = Point(dti.map(normView)-dti.map(Point(0,0))), Point(dti.map(normOrtho)-dti.map(Point(0,0)))
pv = Point(dti.map(normView).p2()), Point(dti.map(normOrtho).p2())
self._pixelVectorCache[1] = pv
self._pixelVectorCache[0] = dt
self._pixelVectorGlobalCache[key] = pv
return self._pixelVectorCache[1]
def pixelLength(self, direction, ortho=False):
"""Return the length of one pixel in the direction indicated (in local coordinates)
If ortho=True, then return the length of one pixel orthogonal to the direction indicated.
Return None if pixel size is not yet defined (usually because the item has not yet been displayed).
"""
normV, orthoV = self.pixelVectors(direction)
if normV == None or orthoV == None:
return None
if ortho:
return orthoV.length()
return normV.length()
def pixelSize(self):
## deprecated
v = self.pixelVectors()
if v == (None, None):
return None, None
return (v[0].x()**2+v[0].y()**2)**0.5, (v[1].x()**2+v[1].y()**2)**0.5
def pixelWidth(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 1, 0)).length()
def pixelHeight(self):
## deprecated
vt = self.deviceTransform()
if vt is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 0, 1)).length()
#return Point(vt.map(QtCore.QPointF(0, 1))-vt.map(QtCore.QPointF(0, 0))).length()
def mapToDevice(self, obj):
"""
Return *obj* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.map(obj)
def mapFromDevice(self, obj):
"""
Return *obj* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
if isinstance(obj, QtCore.QPoint):
obj = QtCore.QPointF(obj)
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectToDevice(self, rect):
"""
Return *rect* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
return vt.mapRect(rect)
def mapRectFromDevice(self, rect):
"""
Return *rect* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
vt = self.deviceTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(rect)
def mapToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.map(obj)
def mapRectToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.mapRect(obj)
def mapFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(obj)
def pos(self):
return Point(self._qtBaseClass.pos(self))
def viewPos(self):
return self.mapToView(self.mapFromParent(self.pos()))
def parentItem(self):
## PyQt bug -- some items are returned incorrectly.
return GraphicsScene.translateGraphicsItem(self._qtBaseClass.parentItem(self))
def setParentItem(self, parent):
## Workaround for Qt bug: https://bugreports.qt-project.org/browse/QTBUG-18616
if parent is not None:
pscene = parent.scene()
if pscene is not None and self.scene() is not pscene:
pscene.addItem(self)
return self._qtBaseClass.setParentItem(self, parent)
def childItems(self):
## PyQt bug -- some child items are returned incorrectly.
return list(map(GraphicsScene.translateGraphicsItem, self._qtBaseClass.childItems(self)))
def sceneTransform(self):
## Qt bug: do no allow access to sceneTransform() until
## the item has a scene.
if self.scene() is None:
return self.transform()
else:
return self._qtBaseClass.sceneTransform(self)
def transformAngle(self, relativeItem=None):
"""Return the rotation produced by this item's transform (this assumes there is no shear in the transform)
If relativeItem is given, then the angle is determined relative to that item.
"""
if relativeItem is None:
relativeItem = self.parentItem()
tr = self.itemTransform(relativeItem)
if isinstance(tr, tuple): ## difference between pyside and pyqt
tr = tr[0]
#vec = tr.map(Point(1,0)) - tr.map(Point(0,0))
vec = tr.map(QtCore.QLineF(0,0,1,0))
#return Point(vec).angle(Point(1,0))
return vec.angleTo(QtCore.QLineF(vec.p1(), vec.p1()+QtCore.QPointF(1,0)))
#def itemChange(self, change, value):
#ret = self._qtBaseClass.itemChange(self, change, value)
#if change == self.ItemParentHasChanged or change == self.ItemSceneHasChanged:
#print "Item scene changed:", self
#self.setChildScene(self) ## This is bizarre.
#return ret
#def setChildScene(self, ch):
#scene = self.scene()
#for ch2 in ch.childItems():
#if ch2.scene() is not scene:
#print "item", ch2, "has different scene:", ch2.scene(), scene
#scene.addItem(ch2)
#QtGui.QApplication.processEvents()
#print " --> ", ch2.scene()
#self.setChildScene(ch2)
def parentChanged(self):
"""Called when the item's parent has changed.
This method handles connecting / disconnecting from ViewBox signals
to make sure viewRangeChanged works properly. It should generally be
extended, not overridden."""
self._updateView()
def _updateView(self):
## called to see whether this item has a new view to connect to
## NOTE: This is called from GraphicsObject.itemChange or GraphicsWidget.itemChange.
## It is possible this item has moved to a different ViewBox or widget;
## clear out previously determined references to these.
self.forgetViewBox()
self.forgetViewWidget()
## check for this item's current viewbox or view widget
view = self.getViewBox()
#if view is None:
##print " no view"
#return
oldView = None
if self._connectedView is not None:
oldView = self._connectedView()
if view is oldView:
#print " already have view", view
return
## disconnect from previous view
if oldView is not None:
for signal, slot in [('sigRangeChanged', self.viewRangeChanged),
('sigDeviceRangeChanged', self.viewRangeChanged),
('sigTransformChanged', self.viewTransformChanged),
('sigDeviceTransformChanged', self.viewTransformChanged)]:
try:
getattr(oldView, signal).disconnect(slot)
except (TypeError, AttributeError, RuntimeError):
# TypeError and RuntimeError are from pyqt and pyside, respectively
pass
self._connectedView = None
## connect to new view
if view is not None:
#print "connect:", self, view
if hasattr(view, 'sigDeviceRangeChanged'):
# connect signals from GraphicsView
view.sigDeviceRangeChanged.connect(self.viewRangeChanged)
view.sigDeviceTransformChanged.connect(self.viewTransformChanged)
else:
# connect signals from ViewBox
view.sigRangeChanged.connect(self.viewRangeChanged)
view.sigTransformChanged.connect(self.viewTransformChanged)
self._connectedView = weakref.ref(view)
self.viewRangeChanged()
self.viewTransformChanged()
## inform children that their view might have changed
self._replaceView(oldView)
self.viewChanged(view, oldView)
def viewChanged(self, view, oldView):
"""Called when this item's view has changed
(ie, the item has been added to or removed from a ViewBox)"""
pass
def _replaceView(self, oldView, item=None):
if item is None:
item = self
for child in item.childItems():
if isinstance(child, GraphicsItem):
if child.getViewBox() is oldView:
child._updateView()
#self._replaceView(oldView, child)
else:
self._replaceView(oldView, child)
def viewRangeChanged(self):
"""
Called whenever the view coordinates of the ViewBox containing this item have changed.
"""
pass
def viewTransformChanged(self):
"""
Called whenever the transformation matrix of the view has changed.
(eg, the view range has changed or the view was resized)
"""
pass
#def prepareGeometryChange(self):
#self._qtBaseClass.prepareGeometryChange(self)
#self.informViewBoundsChanged()
def informViewBoundsChanged(self):
"""
Inform this item's container ViewBox that the bounds of this item have changed.
This is used by ViewBox to react if auto-range is enabled.
"""
view = self.getViewBox()
if view is not None and hasattr(view, 'implements') and view.implements('ViewBox'):
view.itemBoundsChanged(self) ## inform view so it can update its range if it wants
def childrenShape(self):
"""Return the union of the shapes of all descendants of this item in local coordinates."""
childs = self.allChildItems()
shapes = [self.mapFromItem(c, c.shape()) for c in self.allChildItems()]
return reduce(operator.add, shapes)
def allChildItems(self, root=None):
"""Return list of the entire item tree descending from this item."""
if root is None:
root = self
tree = []
for ch in root.childItems():
tree.append(ch)
tree.extend(self.allChildItems(ch))
return tree
def setExportMode(self, export, opts=None):
"""
This method is called by exporters to inform items that they are being drawn for export
with a specific set of options. Items access these via self._exportOptions.
When exporting is complete, _exportOptions is set to False.
"""
if opts is None:
opts = {}
if export:
self._exportOpts = opts
#if 'antialias' not in opts:
#self._exportOpts['antialias'] = True
else:
self._exportOpts = False
#def update(self):
#self._qtBaseClass.update(self)
#print "Update:", self
def getContextMenus(self, event):
return [self.getMenu()] if hasattr(self, "getMenu") else []
| {
"repo_name": "campagnola/acq4",
"path": "acq4/pyqtgraph/graphicsItems/GraphicsItem.py",
"copies": "3",
"size": "23411",
"license": "mit",
"hash": -1551532586221088000,
"line_mean": 39.1560891938,
"line_max": 851,
"alpha_frac": 0.5837426851,
"autogenerated": false,
"ratio": 4.32415958625785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.640790227135785,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from random import random, shuffle
class cwr2gb_Negotiator:
# Constructor - Note that you can add other fields here; the only
# required fields are self.preferences and self.offer
def __init__(self):
self.preferences = []
self.offer = []
self.iter_limit = 0
self.count = 0
self.nego = ""
self.result = (True, 0, 0, 0)
self.util = 0
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
self.preferences = preferences
self.iter_limit = iter_limit
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
#Counter for rounds and determining which negotiator you are
if self.count > 0:
self.count = self.count + 1
if offer == None and self.count == 0:
self.nego = "A"
self.count = self.count + 1
self.offer = self.preferences[:]
return self.offer
elif self.count == 0:
self.nego = "B"
self.count = self.count + 1
self.offer = self.preferences[:]
return self.offer
#Scenario when my utility is greater than theirs
#Or Scenario when their utility is negative and mine is positive
self.offer = offer
if self.util < 0 and self.utility() > 0:
self.offer = offer[:]
return offer
if self.util < self.utility():
self.offer = offer[:]
return offer
#Scenario for Round 10 and Negotiator B
if self.count == 10 and self.nego == "B":
self.offer = self.preferences[:]
return self.offer
#Scenario for Negotiator A Last Round
if self.count == 11:
self.offer = self.preferences[:]
return self.offer
#Scenario for everything else, random generator to throw off other robots
if self.count != 11 and self.count != 10:
self.util2 = self.utility()
self.offer = self.preferences[:]
self.max = self.utility()
while self.util2 < self.util and self.util < self.max:
ordering = self.preferences[:]
shuffle(ordering)
self.offer = ordering[:]
self.util2 = self.utility()
return self.offer
# utility(self : BaseNegotiator) --> Float
# Return the utility given by the last offer - Do not modify this method.
def utility(self):
total = len(self.preferences)
return reduce(lambda points, item: points + ((total / (self.offer.index(item) + 1)) - abs(self.offer.index(item) - self.preferences.index(item))), self.offer, 0)
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
self.util = utility
return self.util
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
self.result = results
return self.result
| {
"repo_name": "cwr2gb/HW3-A.I.",
"path": "cwr2gb.py",
"copies": "1",
"size": "3819",
"license": "mit",
"hash": -2964479585382716000,
"line_mean": 41.4333333333,
"line_max": 169,
"alpha_frac": 0.609321812,
"autogenerated": false,
"ratio": 4.252783964365256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5362105776365256,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from random import random, shuffle
class first_Negotiator:
# Constructor - Note that you can add other fields here; the only
# required fields are self.preferences and self.offer
def __init__(self):
self.preferences = []
self.offer = []
self.iter_limit = 0
self.count = 0
self.nego = ""
self.result = (True, 0, 0, 0)
self.util = 0
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
self.preferences = preferences
self.iter_limit = iter_limit
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
if self.util < self.utility():
self.offer = offer[:]
return self.offer
self.offer = self.preferences[:]
return self.offer
# utility(self : BaseNegotiator) --> Float
# Return the utility given by the last offer - Do not modify this method.
def utility(self):
total = len(self.preferences)
return reduce(lambda points, item: points + ((total / (self.offer.index(item) + 1)) - abs(self.offer.index(item) - self.preferences.index(item))), self.offer, 0)
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
self.util = utility
return utility
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
self.result = results
return results | {
"repo_name": "cwr2gb/HW3-A.I.",
"path": "firstnegotiator.py",
"copies": "1",
"size": "2292",
"license": "mit",
"hash": 4841623173041521000,
"line_mean": 43.9607843137,
"line_max": 169,
"alpha_frac": 0.6518324607,
"autogenerated": false,
"ratio": 4.182481751824818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006916694290571833,
"num_lines": 51
} |
from functools import reduce
from random import sample
from django.core.cache import cache
from django.db.models import Q
from django.db.models.aggregates import Count
from kolibri.content import models, serializers
from kolibri.content.content_db_router import get_active_content_database
from kolibri.logger.models import ContentSessionLog, ContentSummaryLog
from le_utils.constants import content_kinds
from rest_framework import filters, pagination, viewsets
from .utils.search import fuzz
def _join_with_logical_operator(lst, operator):
op = ") {operator} (".format(operator=operator)
return "(({items}))".format(items=op.join(lst))
class ChannelMetadataCacheViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ChannelMetadataCacheSerializer
def get_queryset(self):
return models.ChannelMetadataCache.objects.all()
class ContentNodeFilter(filters.FilterSet):
search = filters.django_filters.MethodFilter(action='title_description_filter')
recommendations_for = filters.django_filters.MethodFilter()
next_steps = filters.django_filters.MethodFilter()
popular = filters.django_filters.MethodFilter()
resume = filters.django_filters.MethodFilter()
kind = filters.django_filters.MethodFilter()
class Meta:
model = models.ContentNode
fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for']
def title_description_filter(self, queryset, value):
"""
search for title or description that contains the keywords that are not necessary in adjacent
"""
exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))
if exact_match:
return exact_match
# if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description
fuzzed_tokens = [fuzz(word) for word in value.split()]
token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]
return queryset.filter(
Q(parent__isnull=False),
reduce(lambda x, y: x & y, token_queries))
def filter_recommendations_for(self, queryset, value):
"""
Recommend items that are similar to this piece of content.
"""
recc_node = queryset.get(pk=value)
descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])
siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])
data = descendants | siblings # concatenates different querysets
return data
def filter_next_steps(self, queryset, value):
"""
Recommend uncompleted content, content that has user completed content as a prerequisite.
:param queryset: all content nodes for this channel
:param value: id of currently logged in user, or none if user is anonymous
:return: uncompleted content nodes, or empty queryset if user is anonymous
"""
# if user is anonymous, don't return any nodes
if not value:
return queryset.none()
tables = [
'"{summarylog_table}" AS "complete_log"',
'"{summarylog_table}" AS "incomplete_log"',
'"{content_table}" AS "complete_node"',
'"{content_table}" AS "incomplete_node"',
]
table_names = {
"summarylog_table": ContentSummaryLog._meta.db_table,
"content_table": models.ContentNode._meta.db_table,
}
# aliases for sql table names
sql_tables_and_aliases = [table.format(**table_names) for table in tables]
# where conditions joined by ANDs
where_statements = ["NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)",
"complete_log.user_id = {user_id}".format(user_id=value),
"incomplete_log.user_id = {user_id}".format(user_id=value),
"complete_log.progress = 1",
"complete_node.rght = incomplete_node.lft - 1",
"complete_log.content_id = complete_node.content_id"]
# custom SQL query to get uncompleted content based on mptt algorithm
next_steps_recommendations = "SELECT incomplete_node.* FROM {tables} WHERE {where}".format(
tables=", ".join(sql_tables_and_aliases),
where=_join_with_logical_operator(where_statements, "AND")
)
return models.ContentNode.objects.raw(next_steps_recommendations)
def filter_popular(self, queryset, value):
"""
Recommend content that is popular with all users.
:param queryset: all content nodes for this channel
:param value: id of currently logged in user, or none if user is anonymous
:return: 10 most popular content nodes
"""
if ContentSessionLog.objects.count() < 50:
# return 25 random content nodes if not enough session logs
pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])
count = min(pks.count(), 25)
return queryset.filter(pk__in=sample(list(pks), count))
cache_key = 'popular_for_{}'.format(get_active_content_database())
if cache.get(cache_key):
return cache.get(cache_key)
# get the most accessed content nodes
content_counts_sorted = ContentSessionLog.objects \
.filter(channel_id=get_active_content_database()) \
.values_list('content_id', flat=True) \
.annotate(Count('content_id')) \
.order_by('-content_id__count')
most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))
# cache the popular results queryset for 10 minutes, for efficiency
cache.set(cache_key, most_popular, 60 * 10)
return most_popular
def filter_resume(self, queryset, value):
"""
Recommend content that the user has recently engaged with, but not finished.
:param queryset: all content nodes for this channel
:param value: id of currently logged in user, or none if user is anonymous
:return: 10 most recently viewed content nodes
"""
# if user is anonymous, return no nodes
if not value:
return queryset.none()
# get the most recently viewed, but not finished, content nodes
content_ids = ContentSummaryLog.objects \
.filter(user=value, channel_id=get_active_content_database()) \
.exclude(progress=1) \
.order_by('end_timestamp') \
.values_list('content_id', flat=True) \
.distinct()
resume = queryset.filter(content_id__in=list(content_ids[:10]))
return resume
def filter_kind(self, queryset, value):
"""
Show only content of a given kind.
:param queryset: all content nodes for this channel
:param value: 'content' for everything except topics, or one of the content kind constants
:return: content nodes of the given kind
"""
if value == 'content':
return queryset.exclude(kind=content_kinds.TOPIC).order_by("lft")
return queryset.filter(kind=value).order_by("lft")
class OptionalPageNumberPagination(pagination.PageNumberPagination):
"""
Pagination class that allows for page number-style pagination, when requested.
To activate, the `page_size` argument must be set. For example, to request the first 20 records:
`?page_size=20&page=1`
"""
page_size = None
page_size_query_param = "page_size"
class ContentNodeViewset(viewsets.ModelViewSet):
serializer_class = serializers.ContentNodeSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ContentNodeFilter
pagination_class = OptionalPageNumberPagination
def get_queryset(self):
return models.ContentNode.objects.all()
class FileViewset(viewsets.ModelViewSet):
serializer_class = serializers.FileSerializer
pagination_class = OptionalPageNumberPagination
def get_queryset(self):
return models.File.objects.all()
| {
"repo_name": "jayoshih/kolibri",
"path": "kolibri/content/api.py",
"copies": "1",
"size": "8360",
"license": "mit",
"hash": 2838202764874714600,
"line_mean": 41.8717948718,
"line_max": 140,
"alpha_frac": 0.6557416268,
"autogenerated": false,
"ratio": 4.17373939091363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.532948101771363,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from random import sample
from django.db.models import Q
from kolibri.content import models, serializers
from rest_framework import filters, pagination, viewsets
from .utils.search import fuzz
class ChannelMetadataCacheViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ChannelMetadataCacheSerializer
def get_queryset(self):
return models.ChannelMetadataCache.objects.all()
class ContentNodeFilter(filters.FilterSet):
search = filters.django_filters.MethodFilter(action='title_description_filter')
recommendations_for = filters.django_filters.MethodFilter()
recommendations = filters.django_filters.MethodFilter()
class Meta:
model = models.ContentNode
fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'recommendations']
def title_description_filter(self, queryset, value):
"""
search for title or description that contains the keywords that are not necessary in adjacent
"""
exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))
if exact_match:
return exact_match
# if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description
fuzzed_tokens = [fuzz(word) for word in value.split()]
token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]
return queryset.filter(
Q(parent__isnull=False),
reduce(lambda x, y: x & y, token_queries))
def filter_recommendations_for(self, queryset, value):
recc_node = queryset.get(pk=value)
descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])
siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])
data = descendants | siblings # concatenates different querysets
return data
def filter_recommendations(self, queryset, value):
# return 25 random content nodes
pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])
count = min(pks.count(), 25)
return queryset.filter(pk__in=sample(list(pks), count))
class OptionalPageNumberPagination(pagination.PageNumberPagination):
"""
Pagination class that allows for page number-style pagination, when requested.
To activate, the `page_size` argument must be set. For example, to request the first 20 records:
`?page_size=20&page=1`
"""
page_size = None
page_size_query_param = "page_size"
class ContentNodeViewset(viewsets.ModelViewSet):
serializer_class = serializers.ContentNodeSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ContentNodeFilter
pagination_class = OptionalPageNumberPagination
def get_queryset(self):
return models.ContentNode.objects.all()
class FileViewset(viewsets.ModelViewSet):
serializer_class = serializers.FileSerializer
pagination_class = OptionalPageNumberPagination
def get_queryset(self):
return models.File.objects.all()
| {
"repo_name": "66eli77/kolibri",
"path": "kolibri/content/api.py",
"copies": "2",
"size": "3267",
"license": "mit",
"hash": -3450518716482969600,
"line_mean": 40.3544303797,
"line_max": 140,
"alpha_frac": 0.7101316192,
"autogenerated": false,
"ratio": 4.063432835820896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5773564455020895,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from rlib.unroll import unrolling_iterable
from som.interp_type import is_ast_interpreter, is_bytecode_interpreter
"""Captures the known primitives at load time of this module, i.e., at compile
time with RPython.
"""
EXPECTED_NUMBER_OF_PRIMITIVE_FILES = 13
class PrimitivesNotFound(Exception):
pass
def _is_primitives_class(entry_pair):
"NOT_RPYTHON"
from som.primitives.primitives import Primitives
import inspect
entry_name, entry = entry_pair
return (
inspect.isclass(entry)
and issubclass(entry, Primitives)
and entry is not Primitives
and entry is not None
and entry_name is not None
and not entry_name.startswith("_")
)
def _setup_primitives():
"NOT_RPYTHON"
from importlib import import_module
import inspect
from glob import glob
base_package = "som.primitives."
if is_ast_interpreter():
base_package += "ast."
interp_dir = "ast"
else:
assert is_bytecode_interpreter()
base_package += "bc."
interp_dir = "bc"
directory = (
__file__.replace("known.pyc", "").replace("known.py", "") + interp_dir + "/"
)
files = glob(directory + "*_primitives.py")
module_names = [f.replace(directory, "").replace(".py", "") for f in files]
mods = [import_module(base_package + mod) for mod in module_names]
all_members = [inspect.getmembers(mod) for mod in mods]
all_members = reduce(lambda all, each: all + each, all_members)
all_prims = filter(_is_primitives_class, all_members)
prim_pairs = [
(prim_name[: prim_name.find("Primitives")], cls)
for (prim_name, cls) in all_prims
]
if EXPECTED_NUMBER_OF_PRIMITIVE_FILES != len(prim_pairs):
print("")
print("SOM PRIMITIVE DISCOVERY: following primitives found:")
for name, _clazz in prim_pairs:
print(" - %s" % name)
print(
"Expected number of primitive files: %d, found %d"
% (EXPECTED_NUMBER_OF_PRIMITIVE_FILES, len(prim_pairs))
)
print("ERROR: did not find the expected number of primitive files!")
import sys
sys.exit(1)
return prim_pairs
_primitives = unrolling_iterable(_setup_primitives())
def primitives_for_class(cls):
name = cls.get_name().get_embedded_string()
for key, primitives in _primitives:
if key == name:
return primitives
raise PrimitivesNotFound
| {
"repo_name": "SOM-st/PySOM",
"path": "src/som/primitives/known.py",
"copies": "2",
"size": "2509",
"license": "mit",
"hash": 6195368320483205000,
"line_mean": 26.8777777778,
"line_max": 84,
"alpha_frac": 0.6301315265,
"autogenerated": false,
"ratio": 3.695139911634757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000130718954248366,
"num_lines": 90
} |
from functools import reduce
from SDWLE.agents.trade.util import memoized
class FakeCard:
def __init__(self, card):
self.health = card.health
self.attack = card.base_attack
self.base_attack = card.base_attack
if hasattr(card, "taunt"):
self.taunt = card.taunt
class Trade:
def __init__(self, player, my_minion, opp_minion):
self.player = player
self.my_minion = my_minion
self.opp_minion = opp_minion
@memoized
def after_attack(self):
res = {}
res["my_minion"] = self.after_damage(self.my_minion, self.opp_minion)
res["opp_minion"] = self.after_damage(self.opp_minion, self.my_minion)
return res
def after_damage(self, target, attacker):
res = FakeCard(target)
res.health -= attacker.calculate_attack()
return res
def start_value(self):
me = self.minion_value(self.my_minion)
opp = self.minion_value(self.opp_minion)
return me - opp
def end_value(self):
me = self.minion_value(self.after_attack()['my_minion'])
opp = self.minion_value(self.after_attack()['opp_minion'])
return me - opp
@memoized
def value(self):
res = self.end_value() - self.start_value()
if self.after_attack()['my_minion'].health > 0 and \
self.after_attack()['opp_minion'].health <= 0:
res += 1.0
return round(res, 2)
def minion_desc(self, minion):
return "{} {}/{}".format(minion.try_name(), minion.base_attack,
minion.health)
def __str__(self):
s = "Trade {} for {} Value {}"
return s.format(self.minion_desc(self.my_minion),
self.minion_desc(self.opp_minion),
self.value())
def minion_value(self, minion):
if minion.health <= 0:
return 0
res = (minion.base_attack + 0.5) * minion.health ** 1.5
if minion.taunt:
res += 0.5
return res ** 0.4
def is_opp_dead(self):
return self.after_attack()['opp_minion'].health <= 0
def needs_sequence(self):
return True
class TradeSequence:
def __init__(self, current_trades_obj, past_trades=[]):
self.past_trades = past_trades
self.current_trades_obj = current_trades_obj
def after_next_trade(self, next_trade):
past_trades = [t for t in self.past_trades]
past_trades.append(next_trade)
to = self.current_trades_obj
trades_obj = Trades(to.player, to.attack_minions,
to.opp_minions, to.opp_hero.copy(to.player))
trades_obj.attack_minions.remove(next_trade.my_minion)
if next_trade.is_opp_dead():
trades_obj.opp_minions.remove(next_trade.opp_minion)
res = TradeSequence(trades_obj, past_trades)
return res
def has_lethal(self):
return self.current_trades_obj.has_lethal()
def past_trade_value(self):
if self.has_lethal():
return 99999999
else:
return reduce(lambda s, t: s + t.value(), self.past_trades, 0.0)
@memoized
def future_trade_value(self):
if self.has_lethal():
return 9999999999
if len(self.current_trades_obj.attack_minions) == 0:
return 0.0
if len(self.past_trades) > 1:
return 0
next_trades = self.current_trades_obj.trades()
if len(next_trades) == 0:
return 0.0
if len(next_trades) > 1000000:
return 0.0
if self.current_trades_obj.opp_has_taunt():
best_value = -99999999999.0
for next_trade in next_trades:
next_seq = self.after_next_trade(next_trade)
full = next_trade.value() + next_seq.future_trade_value()
if full > best_value:
best_value = full
return best_value
else:
return next_trades[0].value()
@memoized
def trade_value(self):
return self.past_trade_value() + self.future_trade_value()
class FaceTrade(Trade):
def value(self):
if self.is_lethal():
return 9999999
return self.my_minion.base_attack * 0.2
def __str__(self):
return "Face {} Value {}".format(self.minion_desc(self.my_minion),
self.value())
def is_lethal(self):
return self.my_minion.base_attack >= self.opp_minion.health
def needs_sequence(self):
return False
class Trades:
def __init__(self, player, attack_minions, opp_minions, opp_hero):
self.player = player
self.attack_minions = attack_minions[0:99999]
self.opp_minions = opp_minions[0:99999]
self.opp_hero = opp_hero
def opp_has_taunt(self):
for minion in self.opp_minions:
if minion.taunt:
return True
return False
def total_attack(self):
return reduce(lambda s, i: s + i.base_attack, self.attack_minions, 0)
@memoized
def has_lethal(self):
return not self.opp_has_taunt() and \
self.total_attack() >= self.opp_hero.health
@memoized
def trade_value(self, trade):
if not trade.needs_sequence() or len(self.attack_minions) <= 1:
return trade.value()
seq = TradeSequence(self).after_next_trade(trade)
return seq.trade_value()
@memoized
def trades(self):
res = []
me = self.attack_minions
opp = self.targetable_minions(self.opp_minions)
if not self.has_lethal():
for my_minion in me:
for opp_minion in opp:
trade = Trade(self.player, my_minion, opp_minion)
res.append(trade)
if not self.opp_has_taunt():
for my_minion in me:
trade = FaceTrade(self.player, my_minion, self.opp_hero)
res.append(trade)
if self.opp_has_taunt():
if len(res) >= 12:
res = sorted(res, key=lambda t: t.value())[0:4]
elif len(res) >= 8:
res = sorted(res, key=lambda t: t.value())[0:3]
else:
res = sorted(res, key=self.trade_value)
else:
res = sorted(res, key=lambda t: t.value())
res.reverse()
return res
def targetable_minions(self, all):
taunt = [m for m in filter(lambda m: m.taunt, all)]
if len(taunt) > 0:
return taunt
else:
return all
def __str__(self):
res = ["TRADES:"]
for t in self.trades():
s = t.__str__()
s += " Root Value: {}".format(self.trade_value(t))
res.append(s)
return str.join("\n", res)
class TradeMixin:
def trades(self, player):
res = Trades(player, self.attack_minions(player),
player.opponent.minions, player.opponent.hero)
return [t for t in res.trades() if t.value() > -1]
class AttackMixin:
def attack_once(self, player):
trades = self.trades(player)
if len(trades) > 0:
self.current_trade = trades[0]
self.current_trade.my_minion.attack()
def attack(self, player):
if len(self.trades(player)) > 0:
self.attack_once(player)
self.attack(player)
def attack_minions(self, player):
res = [minion
for minion
in filter(lambda minion: minion.can_attack(), player.minions)]
if player.hero.can_attack() and False:
res.append(player.hero)
return res
| {
"repo_name": "jomyhuang/sdwle",
"path": "SDWLE/agents/trade/trade.py",
"copies": "1",
"size": "7723",
"license": "mit",
"hash": -4452775827157405000,
"line_mean": 28.7038461538,
"line_max": 78,
"alpha_frac": 0.5527644698,
"autogenerated": false,
"ratio": 3.4945701357466064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9547334605546607,
"avg_score": 0,
"num_lines": 260
} |
from functools import reduce
from stango.views import file_from_tar, static_file
import collections
import os
import tarfile
FilespecBase = collections.namedtuple('Filespec', 'path view kwargs')
class Filespec(FilespecBase):
def __new__(cls, path, view, kwargs={}):
if not isinstance(path, str):
raise TypeError('path must be a str, not %r' % path)
if path.startswith('/'):
raise ValueError('%r: path must not start with /' % path)
if not isinstance(view, collections.Callable):
raise TypeError('%r: view must be callable' % path)
if not isinstance(kwargs, dict):
raise TypeError('%r: kwargs must be a dict' % path)
return super(Filespec, cls).__new__(cls, path, view, kwargs)
def isdir(self):
return not self.path or self.path.endswith('/')
def realpath(self, index_file):
if not self.isdir():
return self.path
elif not index_file:
raise ValueError('Directory path and no index_file: %r' %
self.path)
return os.path.join(self.path, index_file)
class Files(collections.MutableSequence):
def __init__(self, *args):
self._data = []
for arg in args:
if isinstance(arg, tuple):
self.append(arg)
elif isinstance(arg, collections.Iterable):
for item in arg:
self.append(item)
else:
self.append(arg)
def _verify(self, arg):
if isinstance(arg, Filespec):
return arg
elif isinstance(arg, tuple):
if len(arg) < 2 or len(arg) > 3:
raise TypeError('expected a tuple of the form (path, view[, kwargs])')
if len(arg) == 2:
path, view = arg
kwargs = {}
else:
path, view, kwargs = arg
return Filespec(path, view, kwargs)
else:
raise TypeError('expected a Filespec object or tuple, got %r' % arg)
def __len__(self):
return len(self._data)
def __getitem__(self, index):
return self._data[index]
def __setitem__(self, index, value):
self._data[index] = self._verify(value)
def __delitem__(self, index):
del self._data[index]
def insert(self, index, value):
self._data.insert(index, self._verify(value))
def __eq__(self, other):
if len(self) != len(other):
return False
for a, b in zip(self, other):
if a != b:
return False
return True
def add_prefix(self, prefix):
return Files((prefix + f.path, f.view, f.kwargs) for f in self)
def _served_path(basepath, filename, strip):
if strip > 0:
parts = filename.split('/')[strip:]
if not parts:
return ''
served_name = os.path.join(*parts)
else:
served_name = filename
return os.path.join(basepath, served_name)
def files_from_tar(basepath, tarname, strip=0):
tar = tarfile.open(tarname, 'r')
result = Files()
for member in tar.getmembers():
if not member.isfile():
continue
filename = _served_path(basepath, member.name, strip)
if filename:
result.append((
filename,
file_from_tar,
{'tar': tar, 'member': member.name}
))
return result
def files_from_dir(basepath, dir_, strip=0):
result = Files()
for dirpath, dirnames, filenames in os.walk(dir_):
for filename in filenames:
path = os.path.join(dirpath, filename)
result.append((
_served_path(basepath, path, strip),
static_file,
{'path': os.path.join(dirpath, filename)}
))
return result
| {
"repo_name": "akheron/stango",
"path": "stango/files.py",
"copies": "1",
"size": "3894",
"license": "mit",
"hash": 7348075467001293000,
"line_mean": 27.4233576642,
"line_max": 86,
"alpha_frac": 0.5469953775,
"autogenerated": false,
"ratio": 4.146964856230032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041732310379008925,
"num_lines": 137
} |
from functools import reduce
from sys import intern
__author__ = 'Aaron Hosford'
__all__ = [
'Property',
'Category',
]
class Property:
def __init__(self, name):
if isinstance(name, Property):
self._name = name._name
self._hash = name._hash
else:
self._name = intern(name)
self._hash = id(self._name)
def __str__(self):
return self._name
def __repr__(self):
return type(self).__name__ + "(" + repr(self._name) + ")"
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is other._name
def __ne__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is not other._name
def __le__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is other._name or self._name < other._name
def __ge__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is other._name or self._name > other._name
def __lt__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is not other._name and self._name < other._name
def __gt__(self, other):
if not isinstance(other, Property):
return NotImplemented
return self._name is not other._name and self._name > other._name
@property
def name(self):
return self._name
class Category:
"""Represents a category of classification for a parse tree or parse
tree node."""
def __init__(self, name, positive_properties=None,
negative_properties=None):
if not isinstance(name, str):
raise TypeError(name, str)
self._name = intern(name)
self._positive_properties = (
frozenset([Property(prop) for prop in positive_properties])
if positive_properties
else frozenset()
)
self._negative_properties = (
frozenset([Property(prop) for prop in negative_properties])
if negative_properties
else frozenset()
)
if self._positive_properties & self._negative_properties:
raise ValueError("Property is both positive and negative.")
# This works because we intern the name & properties beforehand:
self._hash = (
id(self._name) ^
reduce(
lambda a, b: a ^ hash(b),
self._positive_properties,
0) ^
reduce(
lambda a, b: a ^ -hash(b),
self._negative_properties,
0
)
)
@property
def name(self):
return self._name
@property
def positive_properties(self):
return self._positive_properties
@property
def negative_properties(self):
return self._negative_properties
def has_properties(self, *properties):
for prop in properties:
if not isinstance(prop, Property):
prop = Property(prop)
if prop not in self._positive_properties:
return False
return True
def lacks_properties(self, *properties):
for prop in properties:
if not isinstance(prop, Property):
prop = Property(prop)
if prop in self._positive_properties:
return False
return True
def to_str(self, simplify=True):
result = self._name
properties = []
if self._positive_properties:
properties = sorted(
[str(prop) for prop in self._positive_properties])
if not simplify and self._negative_properties:
properties.extend(
sorted('-' + str(prop)
for prop in self._negative_properties)
)
if properties:
result += '(' + ','.join(properties) + ')'
return result
def __str__(self):
return self.to_str()
def __repr__(self):
return type(self).__name__ + "(" + repr(self._name) + ", " + repr(
sorted(self._positive_properties)) + ", " + repr(
sorted(self._negative_properties)) + ")"
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Category):
return NotImplemented
# We can use "is" instead of "==" for names because we intern them
# all ahead of time.
return self is other or (
self._hash == other._hash and
self._name is other._name and
self._positive_properties == other._positive_properties and
self._negative_properties == other._negative_properties
)
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
if not isinstance(other, Category):
return NotImplemented
if self._name is not other._name: # They are interned...
return self._name < other._name
if len(self._positive_properties) != \
len(other._positive_properties):
return (
len(self._positive_properties) <
len(other._positive_properties)
)
if len(self._negative_properties) != \
len(other._negative_properties):
return (
len(self._negative_properties) <
len(other._negative_properties)
)
my_sorted_positive = sorted(self._positive_properties)
other_sorted_positive = sorted(other._positive_properties)
if my_sorted_positive != other_sorted_positive:
return my_sorted_positive < other_sorted_positive
return (
sorted(self._negative_properties) <=
sorted(other._negative_properties)
)
def __lt__(self, other):
if not isinstance(other, Category):
return NotImplemented
if self._name is not other._name: # They are interned...
return self._name < other._name
if len(self._positive_properties) != \
len(other._positive_properties):
return (
len(self._positive_properties) <
len(other._positive_properties)
)
if len(self._negative_properties) != \
len(other._negative_properties):
return (
len(self._negative_properties) <
len(other._negative_properties)
)
self_sorted_positive = sorted(self._positive_properties)
other_sorted_positive = sorted(other._positive_properties)
if self_sorted_positive != other_sorted_positive:
return self_sorted_positive < other_sorted_positive
return (
sorted(self._negative_properties) <
sorted(other._negative_properties)
)
def __ge__(self, other):
if not isinstance(other, Category):
return NotImplemented
return other <= self
def __gt__(self, other):
if not isinstance(other, Category):
return NotImplemented
return not (self <= other)
def __contains__(self, other):
if not isinstance(other, Category):
return NotImplemented
# They must have the same name, and all the properties that apply
# to this category must apply to the other category. We can use
# "is" instead of "==" because we intern the names ahead of time.
return self is other or (
(self._name is other._name or self.is_wildcard()) and
self._positive_properties <= other._positive_properties and
not self._negative_properties & other._positive_properties
)
def is_wildcard(self):
return self._name == "_"
def promote_properties(self, positive, negative):
return type(self)(
self._name,
(self._positive_properties |
(frozenset(positive) - self._negative_properties)),
(self._negative_properties |
(frozenset(negative) - self._positive_properties))
)
| {
"repo_name": "hosford42/pyramids",
"path": "pyramids/categorization.py",
"copies": "1",
"size": "8391",
"license": "mit",
"hash": 6518556732465800000,
"line_mean": 32.0354330709,
"line_max": 74,
"alpha_frac": 0.555714456,
"autogenerated": false,
"ratio": 4.729988726042841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5785703182042842,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from sys import intern
__author__ = 'Aaron Hosford'
__all__ = [
'Tokenizer',
'StandardTokenizer',
'tokenize',
'TokenSequence',
]
class Tokenizer:
def tokenize(self, text):
raise NotImplementedError()
class StandardTokenizer(Tokenizer):
def __init__(self, discard_spaces=True):
self._discard_spaces = bool(discard_spaces)
self.contractions = ("'", "'m", "'re", "'s", "'ve", "'d", "'ll")
@property
def discard_spaces(self):
return self._discard_spaces
@staticmethod
def is_word_char(char):
return char.isalnum() or char == "'"
def tokenize(self, text):
token = ''
start = 0
end = 0
for char in text:
if (not token or
token[-1] == char or
(self.is_word_char(token[-1]) and
self.is_word_char(char))):
token += char
else:
if not self.discard_spaces or token.strip():
if token.endswith(self.contractions):
split = token.split("'")
if len(split) > 1 and (
len(split) != 2 or split[0]):
yield (
"'".join(split[:-1]),
start,
end - len(split[-1])
)
yield "'" + split[-1], end - len(split[-1]), end
elif (token[-2:].lower() in ('am', 'pm') and
token[:-2].isdigit()):
yield token[:-2], start, end - 2
yield token[-2:], end - 2, end
elif (token[-1:].lower() in ('a', 'p') and
token[:-1].isdigit()):
yield token[:-1], start, end - 1
yield token[-1:], end - 1, end
else:
yield token, start, end
token = char
start = end
end += 1
if token and (not self.discard_spaces or token.strip()):
if token.endswith(
("'", "'m", "'re", "'s", "'ve", "'d", "'ll")):
split = token.split("'")
if len(split) > 1:
yield "'".join(split[:-1]), start, end - len(split[-1])
yield "'" + split[-1], end - len(split[-1]), end
elif (token[-2:].lower() in ('am', 'pm') and
token[:-2].isdigit()):
yield token[:-2], start, end - 2
yield token[-2:], end - 2, end
elif token[-1:].lower() in ('a', 'p') and token[:-1].isdigit():
yield token[:-1], start, end - 1
yield token[-1:], end - 1, end
else:
yield token, start, end
_tokenizer = StandardTokenizer()
def tokenize(text):
return _tokenizer.tokenize(text)
class TokenSequence:
"""A sequence of tokens generated for the parse input."""
def __init__(self, tokens):
interned_tokens = []
spans = []
for token, start, end in tokens:
if not isinstance(token, str):
raise TypeError(token, str)
if not isinstance(start, int):
raise TypeError(start, int)
if not isinstance(end, int):
raise TypeError(end, int)
interned_tokens.append(intern(token))
spans.append((start, end))
self._tokens = tuple(interned_tokens)
self._spans = tuple(spans)
self._hash = (
reduce(lambda a, b: a ^ id(b), self._tokens, 0) ^
reduce(lambda a, b: a ^ hash(b), self._spans, 0)
)
def __str__(self):
return ' '.join(self._tokens)
def __repr__(self):
return type(self).__name__ + "(" + repr(self._tokens) + ")"
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
return self is other or (self._hash == other._hash and
self._tokens == other._tokens and
self._spans == other._spans)
def __ne__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
return not (self == other)
def __le__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
if len(self._tokens) != len(other._tokens):
return len(self._tokens) < len(other._tokens)
if self._tokens != other._tokens:
return self._tokens < other._tokens
return self._spans <= other._spans
def __gt__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
return not (self <= other)
def __ge__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
return other <= self
def __lt__(self, other):
if not isinstance(other, TokenSequence):
return NotImplemented
return not (self >= other)
def __getitem__(self, index):
return self._tokens[index]
def __len__(self):
return len(self._tokens)
def __iter__(self):
return iter(self._tokens)
@property
def tokens(self):
return self._tokens
@property
def spans(self):
return self._spans
| {
"repo_name": "hosford42/pyramids",
"path": "pyramids/tokenization.py",
"copies": "1",
"size": "5603",
"license": "mit",
"hash": 4946316912973830000,
"line_mean": 30.4775280899,
"line_max": 75,
"alpha_frac": 0.4709976798,
"autogenerated": false,
"ratio": 4.453895071542131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424892751342131,
"avg_score": null,
"num_lines": null
} |
# from functools import reduce
from time import time
def get_dict_squares(n):
'''
Función para obtener diccionario con la suma de los cuadrados
de los dígitos de cada número en el rango especificado.
'''
# Lo más rápido
res = {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36,
7: 49, 8: 64, 9: 81}
for i in range(10, n + 1):
res[i] = res[i // 10] + res[i % 10]
return res
# Más rápido que lo de debajo
# res = {}
# for i in range(0, n+1, 10):
# res[i] = sum_squares(i)
# for j in range(1, 10):
# res[i+j] = res[i] + j**2
# return res
# Esto a continuación, elegante pero superlento
# return {i: sum_squares(i) for i in range(1, n+1)}
def sum_squares(n):
'''
Función para sumar cuadradados de los dígitos de cada número
'''
# return reduce(lambda x, y: int(x) + int(y)**2, '0' + str(n))
return sum(map(lambda x: int(x)**2, str(n)))
def chain(n):
'''
Función que aplica la cadena a cada número hasta llegar
a 1 u 89
'''
while n is not 89 and n is not 1:
n = sum_squares(n)
return n
def get_dict_chain(n):
'''
Función que devuelve diccionario con el resultado final de
aplicar función cadena a cada número, desde 1 hasta el máximo
valor alcanzable.
'''
max_sum_squares = sum_squares(n) # esto funciona si aplicamos
# a un rango que termine en 9
return {i: chain(i) for i in range(1, max_sum_squares + 1)}
# dict_squares = get_dict_squares(n)
# print(res)
# print(dict_squares)
# for i in dict_squares:
# res[dict_squares[i]] += 1
# return res
def get_dict_frequency(dictionary):
'''
Función que devuelve diccionario con las frecuencias de cada
valor de retorno de la cadena
'''
limit = max(dictionary.values())
res = {i: 0 for i in range(limit + 1)}
for i in dictionary:
res[dictionary[i]] += 1
return res
if __name__ == "__main__":
start = time()
N = 9999999
dict_chain = get_dict_chain(N)
dict_squares = get_dict_squares(N)
dict_freq = get_dict_frequency(dict_squares)
limit = max(dict_freq.keys())
d1 = d89 = 0
for i in range(1, limit+1):
if dict_chain[i] == 1:
d1 += dict_freq[i]
else:
d89 += dict_freq[i]
print('d1:', d1, 'd89:', d89, 'T:', d1+d89)
print(time() - start, 's')
| {
"repo_name": "floppp/programming_challenges",
"path": "project_euler/051-100/92.py",
"copies": "1",
"size": "2442",
"license": "mit",
"hash": -3752411034492624400,
"line_mean": 25.6373626374,
"line_max": 66,
"alpha_frac": 0.5688943894,
"autogenerated": false,
"ratio": 2.8023121387283236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38712065281283237,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from typing import Iterable
import dask
import dask.array
from dask.delayed import Delayed
from rechunker.types import (
MultiStagePipeline,
ParallelPipelines,
PipelineExecutor,
Stage,
)
class DaskPipelineExecutor(PipelineExecutor[Delayed]):
"""An execution engine based on dask.
Supports zarr and dask arrays as inputs. Outputs must be zarr arrays.
Execution plans for DaskExecutors are dask.delayed objects.
"""
def pipelines_to_plan(self, pipelines: ParallelPipelines) -> Delayed:
return _make_pipelines(pipelines)
def execute_plan(self, plan: Delayed, **kwargs):
return plan.compute(**kwargs)
def _make_pipelines(pipelines: ParallelPipelines) -> Delayed:
pipelines_delayed = [_make_pipeline(pipeline) for pipeline in pipelines]
return _merge(*pipelines_delayed)
def _make_pipeline(pipeline: MultiStagePipeline) -> Delayed:
stages_delayed = [_make_stage(stage) for stage in pipeline]
d = reduce(_add_upstream, stages_delayed)
return d
def _make_stage(stage: Stage) -> Delayed:
if stage.map_args is None:
return dask.delayed(stage.func)()
else:
name = stage.func.__name__ + "-" + dask.base.tokenize(stage.func)
dsk = {(name, i): (stage.func, arg) for i, arg in enumerate(stage.map_args)}
# create a barrier
top_key = "stage-" + dask.base.tokenize(stage.func, stage.map_args)
def merge_all(*args):
# this function is dependent on its arguments but doesn't actually do anything
return None
dsk.update({top_key: (merge_all, *list(dsk))})
return Delayed(top_key, dsk)
def _merge_task(*args):
pass
def _merge(*args: Iterable[Delayed]) -> Delayed:
name = "merge-" + dask.base.tokenize(*args)
# mypy doesn't like arg.key
keys = [getattr(arg, "key") for arg in args]
new_task = (_merge_task, *keys)
# mypy doesn't like arg.dask
graph = dask.base.merge(
*[dask.utils.ensure_dict(getattr(arg, "dask")) for arg in args]
)
graph[name] = new_task
d = Delayed(name, graph)
return d
def _add_upstream(first: Delayed, second: Delayed):
upstream_key = first.key
dsk = second.dask
top_layer = _get_top_layer(dsk)
new_top_layer = {}
for key, value in top_layer.items():
new_top_layer[key] = ((lambda a, b: a), value, upstream_key)
dsk_new = dask.base.merge(
dask.utils.ensure_dict(first.dask), dask.utils.ensure_dict(dsk), new_top_layer
)
return Delayed(second.key, dsk_new)
def _get_top_layer(dsk):
if hasattr(dsk, "layers"):
# this is a HighLevelGraph
top_layer_key = list(dsk.layers)[0]
top_layer = dsk.layers[top_layer_key]
else:
# could this go wrong?
first_key = next(iter(dsk))
first_task = first_key[0].split("-")[0]
top_layer = {k: v for k, v in dsk.items() if k[0].startswith(first_task + "-")}
return top_layer
| {
"repo_name": "pangeo-data/rechunker",
"path": "rechunker/executors/dask.py",
"copies": "1",
"size": "3001",
"license": "mit",
"hash": 3786606171733185500,
"line_mean": 28.1359223301,
"line_max": 90,
"alpha_frac": 0.6437854049,
"autogenerated": false,
"ratio": 3.2619565217391306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9403527771002793,
"avg_score": 0.00044283112726760696,
"num_lines": 103
} |
from functools import reduce
from typing import TYPE_CHECKING, Dict, List, Sequence, Tuple, Union
from django.db.models import CharField, Q, Value
from django.db.models.functions import Concat
if TYPE_CHECKING:
from django.db.models import QuerySet
class SearchQuerySetMixin:
"""Mixin to add a searches method to a custom QuerySet."""
def search(self, query: str) -> "QuerySet":
"""Search over the fields defined in the model."""
if not query:
return self # Ignore the search if it's an empty sting
try:
fields: List[
Union[Tuple[str, str], str]
] = self.model.SEARCH_FIELDS # type: ignore
except AttributeError:
fields = []
try:
combined_fields: Dict[str, Sequence] = self.model.SEARCH_COMBINED_FIELDS # type: ignore
except AttributeError:
combined_fields = {}
conditions: List = []
queryset: "QuerySet" = self
if combined_fields:
annotations = {}
for name, combined_field in combined_fields.items():
concat = []
for item in combined_field:
concat += [item, Value(" ")]
print(concat)
annotations[name] = Concat(*concat, output_field=CharField())
queryset = self.annotate(**annotations) # type: ignore
conditions += [
Q(**{f"{field}__icontains": query})
for field in fields + list(combined_fields.keys())
]
if conditions:
return queryset.filter(reduce(lambda x, y: x | y, conditions)).distinct()
return self.none() # type: ignore
| {
"repo_name": "marcosgabarda/django-belt",
"path": "belt/managers.py",
"copies": "1",
"size": "1700",
"license": "mit",
"hash": -4878625203462672000,
"line_mean": 36.7777777778,
"line_max": 100,
"alpha_frac": 0.5782352941,
"autogenerated": false,
"ratio": 4.485488126649076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00047841993501675754,
"num_lines": 45
} |
from functools import reduce
from urllib.parse import urlparse
import time
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('Time: %2.2f sec, function: %r, args: (%r, %r).' % (te - ts, method.__name__, args, kw))
return result
return timed
def is_link(string):
p = urlparse(string)
if p.scheme in ('http', 'https') and p.netloc != '' and p.path != '':
return True
else:
return False
def is_dbpedia_link(prop):
return 'http://dbpedia.org/' in prop
def extract_link_entity(string):
p = urlparse(string)
if p.scheme in ('http', 'https') and p.netloc != '' and p.path != '':
last_val_after_slash = p.path.split('/')[-1]
return last_val_after_slash.replace('_', ' ')
def unique_values(seq) -> list:
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def argmax(values: list) -> int:
return max(enumerate(values), key=lambda x: x[1])[0]
def multi_replace(string: str, replace_tuples: tuple) -> str:
return reduce(lambda a, kv: a.replace(*kv), replace_tuples, string)
| {
"repo_name": "max-andr/deepanswer",
"path": "src/utils.py",
"copies": "1",
"size": "1204",
"license": "apache-2.0",
"hash": -8963181303209944000,
"line_mean": 24.6170212766,
"line_max": 102,
"alpha_frac": 0.5938538206,
"autogenerated": false,
"ratio": 3.2717391304347827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43655929510347824,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from utils import convert_kwarg, format_expression, random_expression
import random
from sympy import *
def addition(digits=2, values=2):
digits = convert_kwarg(digits, int)
values = convert_kwarg(values, int)
sym_vars = [''.join(['_', str(i)]) for i in range(0, values)]
sym_list = list(symbols(' '.join(sym_vars)))
expression = sum(sym_list)
sym_values = []
for v in sym_list:
new_val = random_tens_int(digits)
sym_values.append(str(new_val))
expression = expression.subs(v, new_val)
return ' + '.join(sym_values), str(expression)
#def addition_word_problems():
# pass
def subtraction(digits=2, values=2):
digits = convert_kwarg(digits, int)
values = convert_kwarg(values, int)
sym_vars = [''.join(['_', str(i)]) for i in range(0, values)]
sym_list = list(symbols(' '.join(sym_vars)))
expression = reduce(lambda x, y: x - y, sym_list)
sym_values = []
for v in sym_list:
new_val = random_tens_int(digits)
sym_values.append(str(new_val))
expression = expression.subs(v, new_val)
return ' - '.join(sym_values), str(expression)
# def subtraction_word_problem():
# pass
def division(dividend_digits=3, divisor_digits=1, rounding=2):
dividend_digits = convert_kwarg(dividend_digits, int)
divisor_digits = convert_kwarg(divisor_digits, int)
rounding = convert_kwarg(rounding, int)
dividend = random_tens_int(dividend_digits)
divisor = random_tens_int(divisor_digits)
problem_text = ' / '.join([str(dividend), str(divisor)])
solutions = []
#Solution #1
float_solution = dividend / divisor
if float_solution % 10 == 0:
solutions.append(str(int(float_solution)))
else:
solutions.append(str(round(float_solution, rounding)))
#Solution #2
r = symbols('r')
remainder = solve(Eq((dividend - r) / divisor, int(float_solution)))[0]
solutions.append('r'.join([str(int(float_solution)), str(remainder)]))
return problem_text, solutions
# def division_word_problem():
# pass
def multiplication(digits=2, values=2):
digits = convert_kwarg(digits, int)
values = convert_kwarg(values, int)
sym_vars = [''.join(['_', str(i)]) for i in range(0, values)]
sym_list = list(symbols(' '.join(sym_vars)))
expression = reduce(lambda x, y: x * y, sym_list)
sym_values = []
for v in sym_list:
new_val = random_tens_int(digits)
sym_values.append(str(new_val))
expression = expression.subs(v, new_val)
return ' * '.join(sym_values), str(expression)
# def multiplication_word_problem():
# pass
def order_of_operations(depth=2, rounding=2):
depth = convert_kwarg(depth, int)
expression = random_expression()
for t in range(0, depth-1):
exp_type = random.choice(['left', 'right', 'center_left', 'center_right'])
if exp_type == 'left':
expression = random_expression(expression)
elif exp_type == 'right':
expression = random_expression(None, expression)
elif exp_type == 'center_left':
expression = random_expression(expression, random_expression())
elif exp_type == 'center_right':
expression = random_expression(random_expression(), expression)
return format_expression(expression), str(round(expression.evalf(), rounding))
# def order_of_operations_ii():
# pass
# def order_of_operations_with_absolute_values():
# pass
| {
"repo_name": "tutorgen/problem_generator",
"path": "problems/algebra_1/arithmetic.py",
"copies": "1",
"size": "3519",
"license": "bsd-3-clause",
"hash": -1254942571230841300,
"line_mean": 29.6,
"line_max": 82,
"alpha_frac": 0.6362603012,
"autogenerated": false,
"ratio": 3.439882697947214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9562877753943144,
"avg_score": 0.0026530490408140806,
"num_lines": 115
} |
from functools import reduce
from utils import convert_kwarg, format_expression, random_expression, random_tens_int
import random
from sympy import *
# def solving_equations_with_the_distributive_property_and_by_combining_like_terms():
# pass
# def solving_two_step_equations_simple_values():
# pass
# def work_word_problems():
# pass
# def solving_equations_with_the_distributive_property_first_step():
# pass
def solving_two_step_equations(depth=2, rounding=2):
depth = convert_kwarg(depth, int)
expression = symbols('x')
for t in range(0, depth-1):
exp_type = random.choice(['left', 'right', 'center_left', 'center_right'])
if exp_type == 'left':
expression = random_expression(expression)
elif exp_type == 'right':
expression = random_expression(None, expression)
elif exp_type == 'center_left':
expression = random_expression(expression, random_expression())
elif exp_type == 'center_right':
expression = random_expression(random_expression(), expression)
expression = Eq(expression, random_tens_int(2))
solution = solve(expression)
if not solution:
return solving_two_step_equations(depth, rounding)
solution = solution[0]
solution_1 = format_expression(solution)
solution_2 = str(round(solution.evalf(), rounding))
return "Solve for x: " + format_expression(expression), [solution_1, solution_2]
# def work_word_problems_find_an_individual_time():
# pass
# def solving_two_step_equations_variable_on_the_right():
# pass
# def solving_equations_with_absolute_values_i():
# pass
# def solving_equations_with_inequalities_and_absolute_values():
# pass
# def solving_equations_with_the_distributive_property():
# pass
# def solving_equations_by_combining_like_terms_i():
# pass
# def direct_and_indirect_variation_squared_word_problems():
# pass
# def solving_rational_expressions_i():
# pass
# def rate_word_problems_ii():
# pass
# def solving_with_variables_on_both_sides():
# pass
# def solving_an_equation_word_problem():
# pass
# def solving_equations_with_absolute_values_ii():
# pass
# def solving_equations_by_combining_like_terms_ii():
# pass
# def solving_equations_with_inequalities():
# pass
# def solving_for_a_variable_in_terms_of_other_variables_more_advanced():
# pass
# def solving_two_step_equations_simple_values_no_negatives():
# pass
# def rate_word_problems_iii():
# pass
# def direct_and_indirect_variation_word_problems():
# pass
# def solving_multi_step_equation_word_problem():
# pass
# def solving_for_a_variable_in_terms_of_other_variables():
# pass
# def solving_rational_expressions_ii():
# pass
# def rate_word_problems_i():
# pass
# def solving_with_variables_on_both_sides_1_digit():
# pass
| {
"repo_name": "tutorgen/problem_generator",
"path": "problems/algebra_1/solving_multi_step_equations.py",
"copies": "1",
"size": "2931",
"license": "bsd-3-clause",
"hash": -64056196011978580,
"line_mean": 20.0863309353,
"line_max": 86,
"alpha_frac": 0.6680313886,
"autogenerated": false,
"ratio": 3.2458471760797343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9412190253222765,
"avg_score": 0.0003376622913938467,
"num_lines": 139
} |
from functools import reduce
from utils import convert_kwarg
import random
from sympy import *
# def dividing_polynomials_iii():
# pass
def polynomials_in_standard_form():
terms = [str(random.randint(0, 10)) for x in range(0, 4)]
terms[0] = ''.join([terms[0], 'x^3'])
terms[1] = ''.join([terms[1], 'x^2'])
terms[2] = ''.join([terms[2], 'x'])
solution = ' + '.join(terms)
random.shuffle(terms)
question = "Write the polynomial in standard form: " + ' + '.join(terms)
return question, solution
# def graphing_a_quadratic_inequality():
# pass
# def dividing_polynomials_ii():
# pass
# def adding_and_subtracting_polynomials_with_common_denominators():
# pass
# def distributing_with_polynomials_word_problem():
# pass
# def distributing_with_polynomials():
# pass
# def dividing_polynomials_by_one_term():
# pass
# def multiplying_functions():
# pass
# def graphing_a_quadratic_by_finding_the_vertex_and_the_x_intercepts():
# pass
# def adding_and_subtracting_functions():
# pass
# def multiplying_binomials():
# pass
# def graphing_a_quadratic_by_using_an_x_y_table():
# pass
# def simplifying_polynomials():
# pass
# def dividing_functions():
# pass
# def multiplying_functions_without_a_leading_coefficient():
# pass
# def multiplying_polynomials():
# pass
# def adding_and_subtracting_polynomials():
# pass
| {
"repo_name": "tutorgen/problem_generator",
"path": "problems/algebra_1/polynomials.py",
"copies": "1",
"size": "1447",
"license": "bsd-3-clause",
"hash": -8884343033216442000,
"line_mean": 15.8255813953,
"line_max": 76,
"alpha_frac": 0.6447823082,
"autogenerated": false,
"ratio": 3.091880341880342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42366626500803417,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from xpath import dsl as x
import capybara
from capybara.compat import str_
from capybara.helpers import desc
from capybara.selector.filter_set import add_filter_set, remove_filter_set, filter_sets
from capybara.selector.selector import add_selector, remove_selector, selectors
from capybara.utils import isregex
__all__ = ["add_filter_set", "add_selector", "filter_sets", "remove_filter_set", "remove_selector",
"selectors"]
with add_selector("css") as s:
s.css = lambda css: css
with add_selector("xpath") as s:
s.xpath = lambda xpath: xpath
with add_selector("id") as s:
@s.xpath
def xpath(id):
return x.descendant()[x.attr("id") == id]
with add_filter_set("field") as fs:
@fs.node_filter("checked", boolean=True)
def checked(node, value):
return not node.checked ^ value
@fs.node_filter("disabled", boolean=True, default=False, skip_if="all")
def disabled(node, value):
return not node.disabled ^ value
@fs.node_filter("id")
def id(node, value):
return node["id"] == value
@fs.expression_filter("name")
def name(expr, value):
return expr[x.attr("name") == value]
@fs.expression_filter("placeholder")
def placeholder(expr, value):
return expr[x.attr("placeholder") == value]
@fs.node_filter("readonly", boolean=True)
def readonly(node, value):
return not node.readonly ^ value
@fs.node_filter("unchecked", boolean=True)
def unchecked(node, value):
return node.checked ^ value
@fs.describe
def describe(options):
description, states = "", []
if options.get("checked") or options.get("unchecked") is False:
states.append("checked")
if options.get("unchecked") or options.get("checked") is False:
states.append("not checked")
if options.get("disabled") is True:
states.append("disabled")
if states:
description += " that is {}".format(" and ".join(states))
return description
with add_selector("button") as s:
@s.xpath
def xpath(locator):
input_button_expr = x.descendant("input")[
x.attr("type").one_of("submit", "reset", "image", "button")]
button_expr = x.descendant("button")
image_button_expr = x.descendant("input")[x.attr("type").equals("image")]
if locator:
attr_matchers = (
x.attr("id").equals(locator) |
x.attr("value").is_(locator) |
x.attr("title").is_(locator))
image_attr_matchers = x.attr("alt").is_(locator)
if capybara.enable_aria_label:
attr_matchers |= x.attr("aria-label").is_(locator)
image_attr_matchers |= x.attr("aria-label").is_(locator)
input_button_expr = input_button_expr[attr_matchers]
button_expr = button_expr[
attr_matchers |
x.string.n.is_(locator) |
x.descendant("img")[x.attr("alt").is_(locator)]]
image_button_expr = image_button_expr[image_attr_matchers]
return input_button_expr + button_expr + image_button_expr
@s.node_filter("disabled", boolean=True, default=False, skip_if="all")
def disabled(node, value):
return not node.disabled ^ value
@s.describe
def describe(options):
description = ""
if options.get("disabled") is True:
description += " that is disabled"
return description
with add_selector("checkbox") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("input")[x.attr("type").equals("checkbox")]
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
with add_selector("field") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("input", "select", "textarea")[
~x.attr("type").one_of("hidden", "image", "submit")]
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
@s.expression_filter("field_type")
def field_type(expr, value):
if value in ["select", "textarea"]:
return expr.axis("self", value)
else:
return expr[x.attr("type").equals(value)]
@s.node_filter("value")
def value(node, value):
if isregex(value):
return bool(value.search(node.value))
else:
return node.value == value
@s.describe
def describe(options):
description = ""
if options.get("value"):
description += " with value {}".format(desc(options["value"]))
return description
with add_selector("fieldset") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("fieldset")
if locator:
expr = expr[
x.attr("id").equals(locator) |
x.child("legend")[x.string.n.is_(locator)]]
return expr
with add_selector("file_field") as s:
s.label = "file field"
@s.xpath
def xpath(locator):
expr = x.descendant("input")[x.attr("type").equals("file")]
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
with add_selector("fillable_field") as s:
s.label = "field"
@s.xpath
def xpath(locator):
expr = x.descendant("input", "textarea")[
~x.attr("type").one_of("checkbox", "file", "hidden", "image", "radio", "submit")]
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
@s.node_filter("value")
def value(node, value):
if isregex(value):
return bool(value.search(node.value))
else:
return node.value == value
@s.describe
def describe(options):
description = ""
if options.get("value"):
description += " with value {}".format(desc(options["value"]))
return description
with add_selector("frame") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("frame") + x.descendant("iframe")
if locator:
expr = expr[x.attr("id").equals(locator) | x.attr("name").equals(locator)]
return expr
@s.expression_filter("name")
def name(expr, value):
return expr[x.attr("name").equals(value)]
with add_selector("label") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("label")
if locator:
expr = expr[x.string.n.is_(str_(locator)) | x.attr("id").equals(str_(locator))]
return expr
@s.node_filter("field")
def field(node, field_or_value):
from capybara.node.element import Element
if isinstance(field_or_value, Element):
if field_or_value["id"] and field_or_value["id"] == node["for"]:
return True
else:
return node.base in field_or_value._find_xpath("./ancestor::label[1]")
else:
return node["for"] == str_(field_or_value)
@s.describe
def describe(options):
description = ""
if options.get("field"):
description += " for {}".format(options["field"])
return description
with add_selector("link") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("a")[x.attr("href")]
if locator:
attr_matchers = (
x.attr("id").equals(locator) |
x.attr("title").is_(locator) |
x.string.n.is_(locator))
if capybara.enable_aria_label:
attr_matchers |= x.attr("aria-label").is_(locator)
expr = expr[
attr_matchers |
x.descendant("img")[x.attr("alt").is_(locator)]]
return expr
@s.node_filter("href")
def href(node, href):
if isregex(href):
return bool(href.search(node["href"]))
else:
# For href element attributes, Selenium returns the full URL that would
# be visited rather than the raw value in the source. So we use XPath.
query = x.axis("self")[x.attr("href") == str_(href)]
return node.has_selector("xpath", query)
@s.describe
def describe(options):
description = ""
if options.get("href"):
description += " with href {}".format(desc(options["href"]))
return description
with add_selector("link_or_button") as s:
s.label = "link or button"
@s.xpath
def xpath(locator):
return selectors["link"](locator) + selectors["button"](locator)
@s.node_filter("disabled", boolean=True, default=False, skip_if="all")
def disabled(node, value):
return (
node.tag_name == "a" or
not node.disabled ^ value)
@s.describe
def describe(options):
description = ""
if options.get("disabled") is True:
description += " that is disabled"
return description
with add_selector("option") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("option")
if locator:
expr = expr[x.string.n.is_(locator)]
return expr
with add_selector("radio_button") as s:
s.label = "radio button"
@s.xpath
def xpath(locator):
expr = x.descendant("input")[x.attr("type").equals("radio")]
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
with add_selector("select") as s:
s.label = "select box"
@s.xpath
def xpath(locator):
expr = x.descendant("select")
expr = _locate_field(expr, locator)
return expr
s.filter_set("field")
@s.node_filter("multiple", boolean=True)
def multiple(node, value):
return not node.multiple ^ value
@s.node_filter("options")
def options(node, options):
if node.visible:
actual = [n.text for n in node.find_all("xpath", ".//option")]
else:
actual = [n.all_text for n in node.find_all("xpath", ".//option", visible=False)]
return sorted(options) == sorted(actual)
@s.node_filter("selected")
def selected(node, selected):
if not isinstance(selected, list):
selected = [selected]
actual = [
n.all_text
for n in node.find_all("xpath", ".//option", visible=False)
if n.selected]
return sorted(selected) == sorted(actual)
@s.expression_filter("with_options")
def with_options(expr, options):
return reduce(lambda xpath, option: xpath[selectors["option"](option)], options, expr)
@s.describe
def describe(options):
description = ""
if options.get("multiple") is True:
description += " with the multiple attribute"
if options.get("multiple") is False:
description += " without the multiple attribute"
if options.get("options"):
description += " with options {}".format(desc(options["options"]))
if options.get("selected"):
description += " with {} selected".format(desc(options["selected"]))
if options.get("with_options"):
description += " with at least options {}".format(desc(options["with_options"]))
return description
with add_selector("table") as s:
@s.xpath
def xpath(locator):
expr = x.descendant("table")
if locator:
expr = expr[
x.attr("id").equals(locator) |
x.descendant("caption").is_(locator)]
return expr
def _locate_field(field_expr, locator):
expr = field_expr
if locator:
attr_matchers = (
x.attr("id").equals(locator) |
x.attr("name").equals(locator) |
x.attr("placeholder").equals(locator) |
x.attr("id").equals(x.anywhere("label")[x.string.n.is_(locator)].attr("for")))
if capybara.enable_aria_label:
attr_matchers |= x.attr("aria-label").is_(locator)
expr = expr[attr_matchers]
expr += x.descendant("label")[x.string.n.is_(locator)].descendant(field_expr)
return expr
| {
"repo_name": "elliterate/capybara.py",
"path": "capybara/selector/__init__.py",
"copies": "1",
"size": "12150",
"license": "mit",
"hash": 8793891696528201000,
"line_mean": 29.604534005,
"line_max": 99,
"alpha_frac": 0.5741563786,
"autogenerated": false,
"ratio": 3.8473717542748576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9919401080221967,
"avg_score": 0.00042541053057813165,
"num_lines": 397
} |
from functools import reduce
import abc
class Matchable:
@abc.abstractmethod
def decompose(self):
raise UnimplementedException('decompose unimplemented in class %s' % self.__class__)
@classmethod
@abc.abstractmethod
def pattern(cls, *args):
raise UnimplementedException('pattern unimplemented in class %s' % cls)
class PureMatchable(Matchable):
def __new__(typ, *args, **kwargs):
obj = super().__new__(typ)
obj.__init__(*args, **kwargs)
obj.decompose = lambda: ((obj.__class__,) + tuple(args))
return obj
def decompose(self):
return self.decompose() # Looks stupid! But any instance of
# PureMatchable will have an object
# field of decompose
@classmethod
def pattern(cls, *args):
return (cls,) + args
class PatternException(Exception):
pass
class UnimplementedException(Exception):
pass
class Guard:
def __init__(self, guard):
self.guard = guard
class Or:
def __init__(self, pattern):
self.pattern = pattern
class As:
def __init__(self, bind, pattern):
self.bind = bind
self.pattern = pattern
class Literal:
def __init__(self, lit):
self.lit = lit
class PairList:
def __init__(self, head, tail):
self.head = head
self.tail = tail
class EmptyList:
pass
def match(target, *cases, name=None):
for pattern, *rest, action in cases:
validate(rest, action)
patterns = [pattern] + [p.pattern for p in rest if isinstance(p, Or)]
guards = [g.guard for g in rest if isinstance(g, Guard)]
for pattern in patterns:
maps = casematch(target, pattern)
if maps is not False and all(guard(**maps) for guard in guards):
return action(**maps)
raise PatternException('No pattern matches %s%s' % (str(target),
('' if (name is None) else (' in %s' % name))))
def validate(rest,action):
if not (callable(action) and \
all((isinstance(r, Or) or isinstance(r, Guard)) for r in rest)):
raise PatternException('Malformed pattern')
def merge_maps(m1, m2):
mf = {}
if m1 is False or m2 is False:
return False
for k in m1:
if k in m2:
if m1[k] == m2[k]:
mf[k] = m1[k]
else: return False
else: mf[k] = m1[k]
for k in m2:
if k not in m1:
mf[k] = m2[k]
return mf
def casematch(target, pattern):
if pattern == '_':
return {}
elif isinstance(pattern, str):
return {pattern: target}
elif isinstance(target, Literal):
return casematch(target.lit, pattern)
elif isinstance(pattern, Literal) and pattern.lit == target:
return {}
elif isinstance(pattern, PairList) and isinstance(target, list) and len(target) > 0:
return merge_maps(casematch(target[0], pattern.head),
casematch(target[1:], pattern.tail))
elif isinstance(pattern, EmptyList) and target == []:
return {}
elif isinstance(pattern, As):
return merge_maps(casematch(target, pattern.pattern), {pattern.bind: target})
elif isinstance(pattern, type) and isinstance(target, pattern):
return {}
elif isinstance(target, Matchable):
return casematch(target.decompose(), pattern)
elif isinstance(pattern, PureMatchable):
return casematch(target, pattern.decompose())
elif isinstance(pattern, tuple) and isinstance(target, tuple) and \
len(pattern) == len(target):
return reduce(merge_maps, (casematch(t,p) for t,p in zip(target,pattern)), {})
elif pattern == target:
return {}
return False
def matchable(*data):
def parse_patterns_and_guards(data):
pattern = ()
rest = ()
top = True
for elt in data:
if isinstance(elt, Guard) or isinstance(elt, Or):
top = False
rest += (elt,)
elif top:
pattern += (elt,)
else: raise PatternException('Malformed pattern')
return pattern, rest
def owrap(fun):
def wrap(*args):
return match(args, *wrap.cases)
pattern, rest = parse_patterns_and_guards(data)
wrap.cases = [((pattern,) + rest + (fun,))]
def case(*data):
pattern, rest = parse_patterns_and_guards(data)
def cwrap(cfun):
wrap.cases.append((pattern,) + rest + (cfun,))
return wrap
return cwrap
wrap.case = case
return wrap
return owrap
class Match(object):
def __init__(self, inits=[]):
self.cases = []
for pattern in inits:
self.add(*pattern)
def add(self, *case):
self.cases.append(case)
def __call__(self, target):
return match(target, *self.cases)
| {
"repo_name": "mvitousek/pypat",
"path": "pypat.py",
"copies": "1",
"size": "5003",
"license": "mit",
"hash": 771442406940316500,
"line_mean": 32.1324503311,
"line_max": 103,
"alpha_frac": 0.5712572457,
"autogenerated": false,
"ratio": 3.996006389776358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5067263635476358,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import base64
from six import BytesIO
import matplotlib
class EvaluatorHTMLSerializer:
"""
Wraps ClassifierEvaluator so attributes and methods return an HTML
serializable version of them
"""
def __init__(self, evaluator):
self.evaluator = evaluator
def __getattr__(self, key):
attr = getattr(self.evaluator, key)
if callable(attr):
return HTMLSerializableCallable(attr)
else:
return attr
class HTMLSerializableCallable:
"""Wraps a method so that the results is serialized after it is run
"""
def __init__(self, attr):
self.attr = attr
def __call__(self, *args, **kwargs):
obj = self.attr(*args, **kwargs)
if isinstance(obj, matplotlib.axes.Axes):
return figure2html(obj.get_figure())
elif isinstance(obj, matplotlib.figure.Figure):
return figure2html(obj)
elif hasattr(obj, 'to_html'):
return obj.to_html()
else:
raise TypeError('Unsupported type {}'.format(type(obj)))
def figure2html(fig):
return base64_2_html(figure2base64(fig))
def base64_2_html(img):
try:
html = '<img src="data:image/png;base64,'+img+'"></img>' # py2
except:
img = img.decode("utf-8")
html = '<img src="data:image/png;base64,'+img+'"></img>' # py3
return html
def figure2base64(fig):
io = BytesIO()
fig.savefig(io, format='png')
try:
fig_base64 = base64.encodebytes(io.getvalue()) # py3
except:
fig_base64 = base64.encodestring(io.getvalue()) # py2
return fig_base64
def prettify_list(l):
l = [str(idx+1)+'. '+str(el) for idx, el in enumerate(l)]
return reduce(lambda x, y: x+'<br>'+y, l)
def prettify_dict(d):
return prettify_list([key+': '+str(d[key]) for key in d.keys()])
| {
"repo_name": "edublancas/sklearn-model-evaluation",
"path": "src/sklearn_evaluation/report/serialize.py",
"copies": "2",
"size": "1882",
"license": "mit",
"hash": -3969593066717116000,
"line_mean": 24.7808219178,
"line_max": 71,
"alpha_frac": 0.6073326249,
"autogenerated": false,
"ratio": 3.571157495256167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031267156764284466,
"num_lines": 73
} |
from functools import reduce
import collections
class Solution:
def longestWord(self, words: 'List[str]') -> 'str':
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for i, word in enumerate(words):
reduce(dict.__getitem__, word, trie)[END] = i
St = list(trie.values())
result = ''
while St:
curr = St.pop()
if END in curr:
word = words[curr[END]]
if len(word) > len(result) or len(word) == len(result) and word < result:
result = word
St.extend([curr[letter] for letter in curr if letter != END])
return result
class TrieNode:
def __init__(self):
self.next = collections.defaultdict(TrieNode)
self.index = -1
class Solution2:
def longestWord(self, words: List[str]) -> str:
root = TrieNode()
for i, word in enumerate(words):
node = root
for c in word:
node = node.next[c]
node.index = i
St = list(root.next.values())
result = ''
while St:
node = St.pop()
if node.index != -1:
word = words[node.index]
if len(word) > len(result) or len(word) == len(result) and word < result:
result = word
St.extend(node.next.values())
return result
| {
"repo_name": "jiadaizhao/LeetCode",
"path": "0701-0800/0720-Longest Word in Dictionary/0720-Longest Word in Dictionary.py",
"copies": "1",
"size": "1468",
"license": "mit",
"hash": -2497369014260709400,
"line_mean": 30.2340425532,
"line_max": 89,
"alpha_frac": 0.4993188011,
"autogenerated": false,
"ratio": 4.1120448179271705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008786124770137805,
"num_lines": 47
} |
from functools import reduce
import gc
import io
import locale # system locale module, not tornado.locale
import logging
import operator
import textwrap
import sys
import unittest
import warnings
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.netutil import Resolver
from tornado.options import define, add_parse_callback, options
TEST_MODULES = [
"tornado.httputil.doctests",
"tornado.iostream.doctests",
"tornado.util.doctests",
"tornado.test.asyncio_test",
"tornado.test.auth_test",
"tornado.test.autoreload_test",
"tornado.test.concurrent_test",
"tornado.test.curl_httpclient_test",
"tornado.test.escape_test",
"tornado.test.gen_test",
"tornado.test.http1connection_test",
"tornado.test.httpclient_test",
"tornado.test.httpserver_test",
"tornado.test.httputil_test",
"tornado.test.import_test",
"tornado.test.ioloop_test",
"tornado.test.iostream_test",
"tornado.test.locale_test",
"tornado.test.locks_test",
"tornado.test.netutil_test",
"tornado.test.log_test",
"tornado.test.options_test",
"tornado.test.process_test",
"tornado.test.queues_test",
"tornado.test.routing_test",
"tornado.test.simple_httpclient_test",
"tornado.test.tcpclient_test",
"tornado.test.tcpserver_test",
"tornado.test.template_test",
"tornado.test.testing_test",
"tornado.test.twisted_test",
"tornado.test.util_test",
"tornado.test.web_test",
"tornado.test.websocket_test",
"tornado.test.wsgi_test",
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
def test_runner_factory(stderr):
class TornadoTextTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
kwargs["stream"] = stderr
super().__init__(*args, **kwargs)
def run(self, test):
result = super().run(test)
if result.skipped:
skip_reasons = set(reason for (test, reason) in result.skipped)
self.stream.write( # type: ignore
textwrap.fill(
"Some tests were skipped because: %s"
% ", ".join(sorted(skip_reasons))
)
)
self.stream.write("\n") # type: ignore
return result
return TornadoTextTestRunner
class LogCounter(logging.Filter):
"""Counts the number of WARNING or higher log records."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.info_count = self.warning_count = self.error_count = 0
def filter(self, record):
if record.levelno >= logging.ERROR:
self.error_count += 1
elif record.levelno >= logging.WARNING:
self.warning_count += 1
elif record.levelno >= logging.INFO:
self.info_count += 1
return True
class CountingStderr(io.IOBase):
def __init__(self, real):
self.real = real
self.byte_count = 0
def write(self, data):
self.byte_count += len(data)
return self.real.write(data)
def flush(self):
return self.real.flush()
def main():
# Be strict about most warnings (This is set in our test running
# scripts to catch import-time warnings, but set it again here to
# be sure). This also turns on warnings that are ignored by
# default, including DeprecationWarnings and python 3.2's
# ResourceWarnings.
warnings.filterwarnings("error")
# setuptools sometimes gives ImportWarnings about things that are on
# sys.path even if they're not being used.
warnings.filterwarnings("ignore", category=ImportWarning)
# Tornado generally shouldn't use anything deprecated, but some of
# our dependencies do (last match wins).
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("error", category=DeprecationWarning, module=r"tornado\..*")
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings(
"error", category=PendingDeprecationWarning, module=r"tornado\..*"
)
# The unittest module is aggressive about deprecating redundant methods,
# leaving some without non-deprecated spellings that work on both
# 2.7 and 3.2
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message="Please use assert.* instead"
)
warnings.filterwarnings(
"ignore",
category=PendingDeprecationWarning,
message="Please use assert.* instead",
)
# Twisted 15.0.0 triggers some warnings on py3 with -bb.
warnings.filterwarnings("ignore", category=BytesWarning, module=r"twisted\..*")
if (3,) < sys.version_info < (3, 6):
# Prior to 3.6, async ResourceWarnings were rather noisy
# and even
# `python3.4 -W error -c 'import asyncio; asyncio.get_event_loop()'`
# would generate a warning.
warnings.filterwarnings(
"ignore", category=ResourceWarning, module=r"asyncio\..*"
)
# This deprecation warning is introduced in Python 3.8 and is
# triggered by pycurl. Unforunately, because it is raised in the C
# layer it can't be filtered by module and we must match the
# message text instead (Tornado's C module uses PY_SSIZE_T_CLEAN
# so it's not at risk of running into this issue).
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="PY_SSIZE_T_CLEAN will be required",
)
logging.getLogger("tornado.access").setLevel(logging.CRITICAL)
define(
"httpclient",
type=str,
default=None,
callback=lambda s: AsyncHTTPClient.configure(
s, defaults=dict(allow_ipv6=False)
),
)
define("httpserver", type=str, default=None, callback=HTTPServer.configure)
define("resolver", type=str, default=None, callback=Resolver.configure)
define(
"debug_gc",
type=str,
multiple=True,
help="A comma-separated list of gc module debug constants, "
"e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS",
callback=lambda values: gc.set_debug(
reduce(operator.or_, (getattr(gc, v) for v in values))
),
)
define(
"fail-if-logs",
default=True,
help="If true, fail the tests if any log output is produced (unless captured by ExpectLog)",
)
def set_locale(x):
locale.setlocale(locale.LC_ALL, x)
define("locale", type=str, default=None, callback=set_locale)
log_counter = LogCounter()
add_parse_callback(lambda: logging.getLogger().handlers[0].addFilter(log_counter))
# Certain errors (especially "unclosed resource" errors raised in
# destructors) go directly to stderr instead of logging. Count
# anything written by anything but the test runner as an error.
orig_stderr = sys.stderr
counting_stderr = CountingStderr(orig_stderr)
sys.stderr = counting_stderr # type: ignore
import tornado.testing
kwargs = {}
# HACK: unittest.main will make its own changes to the warning
# configuration, which may conflict with the settings above
# or command-line flags like -bb. Passing warnings=False
# suppresses this behavior, although this looks like an implementation
# detail. http://bugs.python.org/issue15626
kwargs["warnings"] = False
kwargs["testRunner"] = test_runner_factory(orig_stderr)
try:
tornado.testing.main(**kwargs)
finally:
# The tests should run clean; consider it a failure if they
# logged anything at info level or above.
if (
log_counter.info_count > 0
or log_counter.warning_count > 0
or log_counter.error_count > 0
or counting_stderr.byte_count > 0
):
logging.error(
"logged %d infos, %d warnings, %d errors, and %d bytes to stderr",
log_counter.info_count,
log_counter.warning_count,
log_counter.error_count,
counting_stderr.byte_count,
)
if options.fail_if_logs:
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_name": "dongpinglai/my_tornado",
"path": "tornado/test/runtests.py",
"copies": "5",
"size": "8348",
"license": "apache-2.0",
"hash": -8835842092684174000,
"line_mean": 33.6390041494,
"line_max": 100,
"alpha_frac": 0.6436272161,
"autogenerated": false,
"ratio": 4.060311284046692,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002836052223524982,
"num_lines": 241
} |
from functools import reduce
import glob
import os
import numpy as np
from bird import utils
from bird import signal_processing as sp
from bird import preprocessing as pp
# def gaussian_probability_density(x, mu, sigma):
# p = 1/(2*np.pi*(sigma**2))**(1/2) * np.exp(-((x-mu)**2)/(2*sigma**2))
# return p
def intersection(l1, l2):
return list(set(l1).intersection(set(l2)))
def intersection_all(sets):
return reduce(intersection, sets, sets[0])
def signal_energy(wave, samplerate):
spectrogram = sp.wave_to_amplitude_spectrogram(wave, samplerate)
return np.sum(spectrogram)
def signal_structure(wave, samplerate):
spectrogram = sp.wave_to_amplitude_spectrogram(wave, samplerate)
norm_spectrogram = pp.normalize(spectrogram)
binary_image = pp.median_clipping(norm_spectrogram, 3)
return np.sum(binary_image)
def compute_class_energy(class_path):
files = glob.glob(os.path.join(class_path, "*.wav"))
energies = [signal_energy(wave, samplerate) for (samplerate, wave) in
map(utils.read_wave_file, files)]
return sum(energies)
def compute_class_structure(class_path):
files = glob.glob(os.path.join(class_path, "*.wav"))
structures = [signal_structure(wave, samplerate) for (samplerate, wave) in
map(utils.read_wave_file, files)]
return sum(structures)
| {
"repo_name": "johnmartinsson/bird-species-classification",
"path": "bird/analysis.py",
"copies": "1",
"size": "1353",
"license": "mit",
"hash": -4627692277314064000,
"line_mean": 32,
"line_max": 78,
"alpha_frac": 0.6977087953,
"autogenerated": false,
"ratio": 3.206161137440758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4403869932740758,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import io
import os
import tempfile
import binascii
import numpy as np
import networkx as nx
from . import db
from .svgoverlay import get_overlay, _make_layer, _find_layer, parser
from lxml import etree
from .dataset import Vertex
from .polyutils import Surface, boundary_edges
from .utils import add_roi
from . import quickflat
class ROIpack(object):
def __init__(self, subject, roifile):
self.subject = subject
self.roifile = roifile
self.rois = {}
self.load_roifile()
def load_roifile(self):
"""Load ROI definitions from self.roifile.
"""
# Check if file exists
if not os.path.exists(self.roifile):
print("ROI file %s doesn't exist.." % self.roifile)
return
# Create basic Vertex to avoid expensive initialization..
empty = Vertex(None, self.subject)
# Load ROIs from file
if self.roifile.endswith("npz"):
roidata = np.load(self.roifile)
for roi in roidata.keys():
self.rois[roi] = empty.copy(roidata[roi])
roidata.close()
elif self.roifile.endswith("svg"):
pts, polys = surfs.getSurf(self.subject, "flat", merge=True, nudge=True)
npts = len(pts)
svgroipack = get_overlay(self.subject, self.roifile, pts, polys)
for name in svgroipack.names:
roimask = np.zeros((npts,))
roimask[svgroipack.get_roi(name)] = 1
self.rois[name] = empty.copy(roimask)
elif self.roifile.endswith("hf5"):
raise NotImplementedError
else:
raise ValueError("Don't understand ROI filetype: %s" % self.roifile)
def to_npz(self, filename):
"""Saves npz file containing ROI masks.
"""
roidata = dict([(name,vd.data) for name,vd in self.rois.items()])
np.savez(filename, **roidata)
def to_svg(self, open_inkscape=False, filename=None):
"""Generate SVG file from vertex ROI masks.
"""
# Generate temp filename if not provided
if filename is None:
filename = tempfile.mktemp(suffix=".svg", prefix=self.subject+"-rois-")
mpts, mpolys = db.get_surf(self.subject, "flat", merge=True, nudge=True)
svgmpts = mpts[:,:2].copy()
svgmpts -= svgmpts.min(0)
svgmpts *= 1024 / svgmpts.max(0)[1]
svgmpts[:,1] = 1024 - svgmpts[:,1]
npts = len(mpts)
svgroipack = get_overlay(self.subject, filename, mpts, mpolys)
# Add default layers
# Add curvature
from matplotlib import cm
#curv = Vertex(np.hstack(get_curvature(self.subject)), self.subject)
#curv = db.get_surfinfo(self.subject, 'curvature')
#fp = io.BytesIO()
#curvim = quickflat.make_png(fp, curv, height=1024, with_rois=False, with_labels=False,
#with_colorbar=False, cmap=cm.gray,recache=True)
#fp.seek(0)
#svgroipack.add_roi("curvature", binascii.b2a_base64(fp.read()), add_path=False)
# Add thickness
# Add ROI boundaries
svg = etree.parse(svgroipack.svgfile, parser=parser)
# Find boundary vertices for each ROI
lsurf, rsurf = [Surface(*pp) for pp in db.get_surf(self.subject, "fiducial")]
flsurf, frsurf = [Surface(*pp) for pp in db.get_surf(self.subject, "flat")]
valids = [set(np.unique(flsurf.polys)), set(np.unique(frsurf.polys))]
# Construct polygon adjacency graph for each surface
polygraphs = [poly_graph(lsurf), poly_graph(rsurf)]
for roi in self.rois.keys():
print("Adding %s.." % roi)
masks = self.rois[roi].left, self.rois[roi].right
mmpts = svgmpts[:len(masks[0])], svgmpts[len(masks[0]):]
roilayer = _make_layer(_find_layer(_find_layer(svg, "rois"),"shapes"), roi)
for valid, pgraph, surf, mask, mmp in zip(valids, polygraphs,
[lsurf, rsurf], masks, mmpts):
if mask.sum() == 0:
continue
# Find bounds
inbound, exbound = get_boundary(surf, np.nonzero(mask)[0])
# Find polys
allbpolys = np.unique(surf.connected[inbound+exbound].indices)
selbpolys = surf.polys[allbpolys]
inpolys = np.in1d(selbpolys, inbound).reshape(selbpolys.shape)
expolys = np.in1d(selbpolys, exbound).reshape(selbpolys.shape)
badpolys = np.logical_or(inpolys.all(1), expolys.all(1))
boundpolys = np.logical_and(np.logical_or(inpolys, expolys).all(1), ~badpolys)
# Walk around boundary
boundpolyinds = set(allbpolys[np.nonzero(boundpolys)[0]])
bgraph = nx.Graph()
pos = dict()
for pa in boundpolyinds:
for pb in set(pgraph[pa]) & boundpolyinds:
edge = pgraph[pa][pb]["verts"]
validverts = list(valid & edge)
if len(validverts) > 0:
pos[edge] = mmp[validverts].mean(0)
bgraph.add_edge(*edge)
cc = nx.cycles.cycle_basis(bgraph)
if len(cc) == 0:
continue
if len(cc) > 1:
# Need to deal with this later: map/reduce calls not python3 compatible
edges = reduce(set.symmetric_difference,
[set(map(lambda l:tuple(sorted(l)), zip(c, c[1:]+[c[0]]))) for c in cc])
eg = nx.from_edgelist(edges)
cycles = nx.cycles.cycle_basis(eg)
#longest = np.argmax(map(len, cycles))
longest = np.argmax([len(x) for x in cycles]) # python3 compatible
path_order = cycles[longest]
else:
path_order = cc[0]
path_points = [tuple(pos[frozenset(p)]) for p in zip(path_order[:-1],
path_order[1:])]
# Store poly
path = "M %f %f L" % tuple(np.nan_to_num(path_points[0]))
path += ", ".join(["%f %f"%tuple(np.nan_to_num(p)) for p in path_points[1:]])
path += "Z "
# Insert into SVG
svgpath = etree.SubElement(roilayer, "path")
svgpath.attrib["style"] = "fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opactiy:1"
svgpath.attrib["d"] = path
#svgpath.attrib["sodipodi:nodetypes"] = "c" * len(pts)
with open(svgroipack.svgfile, "wb") as xml:
xml.write(etree.tostring(svg, pretty_print=True))
def poly_graph(surf):
"""NetworkX undirected graph representing polygons of a Surface.
"""
import networkx as nx
from collections import defaultdict
edges = defaultdict(list)
for ii,(a,b,c) in enumerate(surf.polys):
edges[frozenset([a,b])].append(ii)
edges[frozenset([a,c])].append(ii)
edges[frozenset([b,c])].append(ii)
#nedges = len(edges)
#ii,jj = np.vstack(edges.values()).T
#polymat = sparse.coo_matrix((np.ones((nedges,)), (ii, jj)), shape=[len(self.polys)]*2)
polygraph = nx.Graph()
polygraph.add_edges_from(((p[0], p[1], dict(verts=k)) for k,p in edges.items()))
return polygraph
def get_boundary(surf, vertices, remove_danglers=False):
"""Return interior and exterior boundary sets for `vertices`.
If `remove_danglers` is True vertices in the internal boundary with
only one neighbor in the internal boundary will be moved to the external
boundary.
"""
if not len(vertices):
return [], []
import networkx as nx
# Use networkx to get external boundary
external_boundary = set(nx.node_boundary(surf.graph, vertices))
# Find adjacent vertices to get inner boundary
internal_boundary = set.union(*[set(surf.graph[v].keys())
for v in external_boundary]).intersection(set(vertices))
if remove_danglers:
ingraph = surf.graph.subgraph(internal_boundary)
danglers = [n for n,d in ingraph.degree().items() if d==1]
while danglers:
internal_boundary -= set(danglers)
external_boundary |= set(danglers)
ingraph = surf.graph.subgraph(internal_boundary)
danglers = [n for n,d in ingraph.degree().items() if d<2]
return list(internal_boundary), list(external_boundary)
| {
"repo_name": "gallantlab/pycortex",
"path": "cortex/rois.py",
"copies": "1",
"size": "8871",
"license": "bsd-2-clause",
"hash": -3164041819651560000,
"line_mean": 39.8801843318,
"line_max": 144,
"alpha_frac": 0.5583361515,
"autogenerated": false,
"ratio": 3.727310924369748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4785647075869748,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import itertools
import math
import random
from mongoframes.factory.makers import Maker
from mongoframes import ASC
__all__ = [
'Cycle',
'OneOf',
'RandomReference',
'SomeOf'
]
class Cycle(Maker):
"""
Pick the next item from a list of makers and/or values cycling through the
list and repeating when we reach the end.
"""
def __init__(self, items):
super().__init__()
# The list of makers/values to select from
self._items = items
# The index of the item that will be returned next
self._item_index = 0
def reset(self):
"""Reset the item index"""
self._item_index = 0
def _assemble(self):
# Select the next item
item_index = self._item_index
item = self._items[item_index]
# Move the index on 1 (and wrap it if we are at the end of the list)
self._item_index += 1
if self._item_index >= len(self._items):
self._item_index = 0
# Return the index and it's assembled value
if isinstance(item, Maker):
return [item_index, item._assemble()]
return [item_index, None]
def _finish(self, value):
item = self._items[value[0]]
if isinstance(item, Maker):
with item.target(self.document):
return item._finish(value[1])
return item
class OneOf(Maker):
"""
Pick one item from a list of makers and/or values.
"""
def __init__(self, items, weights=None):
super().__init__()
# The list of makers/values to select from
self._items = items
# The weighting to apply when selecting a maker/value
self._weights = weights
def _assemble(self):
# Select an item
item_index = 0
if self._weights:
item_index = self.weighted(self._weights)
else:
item_index = random.randint(0, len(self._items) - 1)
# Return the index and it's assembled value
item = self._items[item_index]
if isinstance(item, Maker):
return [item_index, item._assemble()]
return [item_index, None]
def _finish(self, value):
item = self._items[value[0]]
if isinstance(item, Maker):
with item.target(self.document):
return item._finish(value[1])
return item
@staticmethod
def weighted(weights):
"""
Return a random integer 0 <= N <= len(weights) - 1, where the weights
determine the probability of each possible integer.
Based on this StackOverflow post by Ned Batchelder:
http://stackoverflow.com/questions/3679694/a-weighted-version-of-random-choice
"""
# Convert weights to floats
weights = [float(w) for w in weights]
# Pick a value at random
choice = random.uniform(0, sum(weights))
# Find the value
position = 0
for i, weight in enumerate(weights):
if position + weight >= choice:
return i
position += weight
class RandomReference(Maker):
"""
Pick a reference document at random from a collection (determined by the
given frame_cls) optionally applying a constraint.
"""
def __init__(self, frame_cls, constraint=None):
super().__init__()
# The frame class that will be used to select a reference with
self._frame_cls = frame_cls
# The constraint applied when select a reference document
self._constraint = constraint or {}
def _assemble(self):
# Select a random float that will be used to select a reference document
# by it's position.
return random.random()
def _finish(self, value):
# Count the number of documents available to pick from
total_documents = self._frame_cls.count(self._constraint)
# Calculate the position of the document we've picked
position = math.floor(total_documents * value)
# Select the document
document = self._frame_cls.one(
self._constraint,
limit=1,
skip=position,
sort=[('_id', ASC)],
projection={'_id': True}
)
# Check the document was found
if document:
return document._id
return None
class SomeOf(Maker):
"""
Pick one or more items from a list of makers and/or values.
"""
def __init__(
self,
items,
sample_size,
weights=None,
with_replacement=False
):
super().__init__()
# The list of makers/values to select from
self._items = items
# The number of items to pick
self._sample_size = sample_size
# The weighting to apply when selecting a maker/value
self._weights = weights
# A flag indicating if the same item can be selected from the list more
# than once.
self._with_replacement = with_replacement
def _assemble(self):
# Select some items
sample_size = int(self._sample_size)
sample_indexes = []
if self._weights:
sample_indexes = self.weighted(
self._weights,
sample_size,
with_replacement=self._with_replacement
)
else:
sample_range = range(0, len(self._items))
if self._with_replacement:
sample_indexes = [random.choice(sample_range) \
for s in range(0, sample_size)]
else:
sample_indexes = random.sample(sample_range, sample_size)
# Return the sampled indexes and their assembled values
values = []
for sample_index in sample_indexes:
item = self._items[sample_index]
if isinstance(item, Maker):
values.append([sample_index, item._assemble()])
else:
values.append([sample_index, None])
return values
def _finish(self, value):
values = []
for sample in value:
item = self._items[sample[0]]
if isinstance(item, Maker):
with item.target(self.document):
values.append(item._finish(sample[1]))
else:
values.append(item)
return values
@staticmethod
def p(i, sample_size, weights):
"""
Given a weighted set and sample size return the probabilty that the
weight `i` will be present in the sample.
Created to test the output of the `SomeOf` maker class. The math was
provided by Andy Blackshaw - thank you dad :)
"""
# Determine the initial pick values
weight_i = weights[i]
weights_sum = sum(weights)
# Build a list of weights that don't contain the weight `i`. This list will
# be used to build the possible picks before weight `i`.
other_weights = list(weights)
del other_weights[i]
# Calculate the probability
probability_of_i = 0
for picks in range(0, sample_size):
# Build the list of possible permutations for this pick in the sample
permutations = list(itertools.permutations(other_weights, picks))
# Calculate the probability for this permutation
permutation_probabilities = []
for permutation in permutations:
# Calculate the probability for each pick in the permutation
pick_probabilities = []
pick_weight_sum = weights_sum
for pick in permutation:
pick_probabilities.append(pick / pick_weight_sum)
# Each time we pick we update the sum of the weight the next
# pick is from.
pick_weight_sum -= pick
# Add the probability of picking i as the last pick
pick_probabilities += [weight_i / pick_weight_sum]
# Multiply all the probabilities for the permutation together
permutation_probability = reduce(
lambda x, y: x * y, pick_probabilities
)
permutation_probabilities.append(permutation_probability)
# Add together all the probabilities for all permutations together
probability_of_i += sum(permutation_probabilities)
return probability_of_i
@staticmethod
def weighted(weights, sample_size, with_replacement=False):
"""
Return a set of random integers 0 <= N <= len(weights) - 1, where the
weights determine the probability of each possible integer in the set.
"""
assert sample_size <= len(weights), "The sample size must be smaller \
than or equal to the number of weights it's taken from."
# Convert weights to floats
weights = [float(w) for w in weights]
weight_indexes = list(range(0, len(weights)))
samples = []
while len(samples) < sample_size:
# Choice a weight
sample = OneOf.weighted(weights)
# Add the choosen weight to our samples
samples.append(weight_indexes[sample])
if not with_replacement:
# Remove the weight from the list of weights we can select from
del weights[sample]
del weight_indexes[sample]
return samples
| {
"repo_name": "GetmeUK/MongoFrames",
"path": "mongoframes/factory/makers/selections.py",
"copies": "1",
"size": "9581",
"license": "mit",
"hash": -1074303563776917100,
"line_mean": 29.6102236422,
"line_max": 86,
"alpha_frac": 0.5734265734,
"autogenerated": false,
"ratio": 4.597408829174664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00346213102591515,
"num_lines": 313
} |
from functools import reduce
import itertools
import numbers
import operator
from . import util
def pylast_tracks_to_play_links(lastfm_tracks, network=None):
if network is None:
network = lastfm_tracks[0].network
return (l for l in itertools.chain(
*(network.get_track_play_links(track_group)
for track_group in util.segment(lastfm_tracks, 20))
) if l is not None)
class TrackWrapper(object):
def __init__(self, top_item):
self.track = top_item.item
self.weight = top_item.weight
@util.cached_property
def track_tags(self):
return self.track.get_top_tags()
@util.cached_property
def artist_tags(self):
return self.track.artist.get_top_tags()
@util.cached_property
def tag_to_count(self):
return {
top_item.item.name: top_item.weight
for top_item in self.track_tags
}
@util.cached_property
def artist_tag_to_count(self):
return {
top_item.item.name: float(top_item.weight)
for top_item in self.track.artist.get_top_tags()
}
class TrackListBuilder(object):
def __init__(self, scorer=NullScorer):
self._scorer = scorer
def build_track_list(self, tracks, number=None, threshold=1, stop_at_number=False):
wrapped_tracks = map(TrackWrapper, tracks)
scored_tracks = []
for wrapped_track in wrapped_tracks:
score = self._scorer.score(wrapped_track)
# print wrapped_track.track
# print score
if score >= threshold:
scored_tracks.append((score, wrapped_track.track))
if stop_at_number and len(scored_tracks) >= number:
break
# sort needs to be descending
return map(operator.itemgetter(1), sorted(scored_tracks)[:number])
class NullScorer(object):
@classmethod
def score(self, wrapped_track):
return 1
class Scorer(object):
@staticmethod
def less_than_weighter(threshold):
return lambda score: 0 if score >= threshold else 1
@staticmethod
def scale_function(scale):
return lambda x: x * scale
def __init__(self, weighter=1, **kwargs):
self.weighter = self.scale_function(weighter) \
if isinstance(weighter, numbers.Number) else weighter
def score(self, wrapped_track):
return self.weighter(self._score(wrapped_track))
class CombinerScorer(Scorer):
def __init__(self, *scorers, **kwargs):
super(CombinerScorer, self).__init__(**kwargs)
self._scorers = scorers
class ProductScorer(CombinerScorer):
def _score(self, wrapped_track):
return reduce(operator.mul, (scorer.score(wrapped_track) for scorer in self._scorers))
class SumScorer(CombinerScorer):
def _score(self, wrapped_track):
return sum(scorer.score(wrapped_track) for scorer in self._scorers)
class MaxScorer(CombinerScorer):
def _score(self, wrapped_track):
return max(scorer.score(wrapped_track) for scorer in self._scorers)
class TagScorer(Scorer):
def __init__(self, tag_matcher, tag_attribute='artist_tags', **kwargs):
super(TagScorer, self).__init__(**kwargs)
self.artist_to_score_cache = {}
self._tag_matcher = tag_matcher if callable(tag_matcher) else lambda x: x.item.name == tag_matcher
self._tag_attribute = tag_attribute
class TagWeightScorer(TagScorer):
def _score(self, wrapped_track):
if wrapped_track.track.artist in self.artist_to_score_cache:
return self.artist_to_score_cache[wrapped_track.track.artist]
tags = getattr(wrapped_track, self._tag_attribute)
matching_tag = None
for tag in tags:
if self._tag_matcher(tag):
matching_tag = tag
break
score = -1 if matching_tag is None else float(tag.weight)/100
self.artist_to_score_cache[wrapped_track.track.artist] = score
return score
class TagRankScorer(TagScorer):
@staticmethod
def binary_rank_to_score(rank, maximum_rank):
return rank < maximum_rank
@staticmethod
def default_rank_to_score(rank, maximum_rank):
return float(maximum_rank - rank)/maximum_rank
def __init__(self, tag_matcher, maximum_rank=5, rank_to_score=None, **kwargs):
super(TagRankScorer, self).__init__(tag_matcher, **kwargs)
if rank_to_score is None:
rank_to_score = self.default_rank_to_score
self._rank_to_score = rank_to_score
self._maximum_rank = maximum_rank
def _score(self, wrapped_track):
tags = getattr(wrapped_track, self._tag_attribute)
matching_tag = None
for rank, tag in enumerate(tags):
if self._tag_matcher(tag):
break
return self._rank_to_score(rank, self._maximum_rank)
| {
"repo_name": "IvanMalison/mopidy-lastfm",
"path": "mopidy_lastfm/score.py",
"copies": "1",
"size": "4906",
"license": "apache-2.0",
"hash": -292148224437172860,
"line_mean": 28.5542168675,
"line_max": 106,
"alpha_frac": 0.6324908276,
"autogenerated": false,
"ratio": 3.7336377473363775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9863067813711979,
"avg_score": 0.0006121522448798258,
"num_lines": 166
} |
from functools import reduce
import itertools
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(4, 1), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
fn, args = form[0], form[1:]
args = (val,) + args
return fn(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(1, 4))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def even(x): return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, even)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
fn, args = form[0], form[1:]
args = args + (val,)
return fn(*args)
return reduce(evalform_back, forms, val)
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def memoize(f, cache=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed
Only use on pure functions
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
"""
if cache == None:
cache = {}
def memof(*args):
if not hashable(args):
return f(*args)
elif args in cache:
return cache[args]
else:
result = f(*args)
cache[args] = result
return result
memof.__name__ = f.__name__
memof.__doc__ = f.__doc__
return memof
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **_kwargs):
args = self.args + args
kwargs = {}
kwargs.update(self.kwargs)
kwargs.update(_kwargs)
try:
return self.func(*args, **kwargs)
except TypeError:
return curry(self.func, *args, **kwargs)
def remove(predicate, coll):
""" Return those items of collection for which predicate(item) is true.
>>> from functoolz import remove
>>> def even(x):
... return x % 2 == 0
>>> list(remove(even, [1, 2, 3, 4]))
[1, 3]
"""
return filter(lambda x: not predicate(x), coll)
def iterate(f, x):
""" Repeatedly apply a function f onto an original input
Yields x, then f(x), then f(f(x)), then f(f(f(x))), etc..
>>> def inc(x): return x + 1
>>> it = iterate(inc, 0)
>>> next(it)
0
>>> next(it)
1
>>> next(it)
2
"""
while True:
yield x
x = f(x)
def accumulate(f, seq):
""" Repeatedly apply binary function f to a sequence, accumulating results
>>> from operator import add, mul
>>> list(accumulate(add, [1, 2, 3, 4, 5]))
[1, 3, 6, 10, 15]
>>> list(accumulate(mul, [1, 2, 3, 4, 5]))
[1, 2, 6, 24, 120]
Accumulate is similar to ``reduce`` and is good for making functions like
cumulative sum
>>> from functools import partial
>>> sum = partial(reduce, add)
>>> cumsum = partial(accumulate, add)
See Also:
itertools.accumulate : In standard itertools for Python 3.2+
"""
result = next(iter(seq))
yield result
for elem in itertools.islice(seq, 1, None):
result = f(result, elem)
yield result
| {
"repo_name": "mrocklin/functoolz",
"path": "functoolz/core.py",
"copies": "1",
"size": "5113",
"license": "bsd-3-clause",
"hash": -5979853251364691000,
"line_mean": 23.0046948357,
"line_max": 79,
"alpha_frac": 0.5345198514,
"autogenerated": false,
"ratio": 3.52864044168392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.456316029308392,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import itertools
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(4, 1), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
fn, args = form[0], form[1:]
args = (val,) + args
return fn(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(1, 4))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def even(x): return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, even)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
fn, args = form[0], form[1:]
args = args + (val,)
return fn(*args)
return reduce(evalform_back, forms, val)
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def memoize(f, cache=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
"""
if cache == None:
cache = {}
def memof(*args):
if not hashable(args):
return f(*args)
elif args in cache:
return cache[args]
else:
result = f(*args)
cache[args] = result
return result
memof.__name__ = f.__name__
memof.__doc__ = f.__doc__
return memof
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.__doc__ = self.func.__doc__
try:
self.func_name = self.func.func_name
except AttributeError:
pass
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __call__(self, *args, **_kwargs):
args = self.args + args
kwargs = {}
kwargs.update(self.kwargs)
kwargs.update(_kwargs)
try:
return self.func(*args, **kwargs)
except TypeError:
return curry(self.func, *args, **kwargs)
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
fns = list(reversed(funcs))
def composed(*args, **kwargs):
ret = fns[0](*args, **kwargs)
for f in fns[1:]:
ret = f(ret)
return ret
return composed
| {
"repo_name": "JNRowe/toolz",
"path": "toolz/functoolz/core.py",
"copies": "2",
"size": "4877",
"license": "bsd-3-clause",
"hash": 1474672204108983000,
"line_mean": 22.9068627451,
"line_max": 79,
"alpha_frac": 0.5273733853,
"autogenerated": false,
"ratio": 3.6422703510082153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5169643736308215,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import json
from typing import Dict, List, NamedTuple, Optional
from django.contrib.auth import get_user_model
from django.utils import timezone
from problem.models import ProblemInstance, ProblemAuthLog
User = get_user_model()
def calculate_problem_score(problem_instance, effective_solve_count, is_first_solve):
points = problem_instance.points
if effective_solve_count != 0:
points += problem_instance.distributed_points // effective_solve_count
points += problem_instance.breakthrough_points if is_first_solve else 0
return points
class ProblemState(NamedTuple):
solve_count: int
first_solve: Optional[User]
class UserState(NamedTuple):
solved_problems: List[ProblemInstance]
last_auth: Optional[timezone.datetime]
class ReplayState(NamedTuple):
datetime: Optional[timezone.datetime]
user_states: Dict[User, UserState]
problem_states: Dict[ProblemInstance, ProblemState]
class AuthReplay:
def __init__(self, problem_list, crunch_timedelta):
self.problem_list = problem_list
self.problem_instances = problem_list.probleminstance_set
self.state = ReplayState(
datetime=None,
user_states={},
problem_states={}
)
self.crunch_timedelta = crunch_timedelta
def process_preparation(self, logs, datetime):
problem_states = self.state.problem_states
user_states = self.state.user_states
solved_log_queries = []
# Collect correct auth log query (solved_log_queries) and update problem state (last line) per problem instance
for problem_instance in self.problem_instances.all():
correct_auth_key = problem_instance.problem.auth_key
solve_logs = logs.filter(problem_instance=problem_instance, auth_key=correct_auth_key)
solved_log_queries.append(solve_logs)
first_solve_log = solve_logs.first() if solve_logs.exists() else None
solve_count = solve_logs.count()
first_solve_user = first_solve_log.user if first_solve_log is not None else None
previous_state = problem_states.get(problem_instance, ProblemState(0, None))
new_state = \
ProblemState(
solve_count=previous_state.solve_count + solve_count,
first_solve=first_solve_user if previous_state.first_solve is None else previous_state.first_solve
)
problem_states[problem_instance] = new_state
solve_logs = \
reduce(lambda x, y: x | y, solved_log_queries, ProblemAuthLog.objects.none()) \
.order_by('datetime')
user_pks_with_logs = solve_logs.values_list('user', flat=True)
users_with_logs = User.objects.filter(pk__in=user_pks_with_logs)
# Update user state per user who has a correct auth logs for problem list
for user in users_with_logs:
previous_state = user_states.get(user, UserState([], None))
user_solve_logs = solve_logs.filter(user=user)
solved_problem_pks = user_solve_logs.values_list('problem_instance', flat=True)
solved_problems = ProblemInstance.objects.filter(pk__in=solved_problem_pks)
last_auth = user_solve_logs.last().datetime
user_states[user] = UserState(
solved_problems=previous_state.solved_problems + list(solved_problems),
last_auth=last_auth
)
self.state = ReplayState(
datetime=datetime,
user_states=user_states,
problem_states=problem_states
)
def prepare(self):
datetime_pivot = timezone.now() - self.crunch_timedelta
logs = ProblemAuthLog.objects \
.filter(problem_instance__in=self.problem_instances.all(), datetime__lte=datetime_pivot) \
.order_by('datetime')
if self.state.datetime is not None:
logs = logs.filter(datetime__gt=self.state.datetime)
self.process_preparation(logs, datetime_pivot)
def update_points_function(self, points_functions, problem_instance, state_diffs):
problem_state = self.state.problem_states[problem_instance]
problem_state_diff = state_diffs.get(problem_instance, None)
solve_count = problem_state.solve_count
first_solver = problem_state.first_solve
if problem_state_diff is not None:
solve_count += problem_state_diff.solve_count
if first_solver is None:
first_solver = problem_state_diff.first_solve
if solve_count == 0:
return
points_functions[problem_instance] = \
lambda user: calculate_problem_score(problem_instance, solve_count, user == first_solver)
def calc_user_points(self, user, problem_instance, points_functions, state_diffs):
user_state = self.state.user_states.get(user, UserState([], None))
user_state_diff = state_diffs.get(user, UserState([], None))
all_solved_problems = user_state.solved_problems + user_state_diff.solved_problems
if problem_instance not in all_solved_problems:
return 0
return points_functions[problem_instance](user)
def get_statistic_data(self):
if self.state.datetime is None:
return [], []
problem_state_diffs = {}
user_state_diffs = {}
points_functions = {}
user_points = {}
for problem_instance in self.state.problem_states:
self.update_points_function(points_functions, problem_instance, problem_state_diffs)
for user, state in self.state.user_states.items():
# pylint: disable=cell-var-from-loop
user_points[user] = \
sum(map(
lambda x: self.calc_user_points(user, x, points_functions, user_state_diffs),
state.solved_problems))
datetime_pivot = self.state.datetime
logs = ProblemAuthLog.objects \
.filter(
problem_instance__in=self.problem_instances.all(),
datetime__gt=datetime_pivot)
solved_log_queries = []
for problem_instance in self.problem_instances.all():
correct_auth_key = problem_instance.problem.auth_key
solve_logs = logs.filter(problem_instance=problem_instance, auth_key=correct_auth_key)
solved_log_queries.append(solve_logs)
logs_to_replay = \
reduce(lambda x, y: x | y, solved_log_queries, ProblemAuthLog.objects.none()) \
.order_by('datetime')
def append_chart(timestamp):
for chart_user, points in user_points.items():
entry = chart_data.get(chart_user.username, [])
entry.append({'x': timestamp.isoformat(), 'y': points})
chart_data[chart_user.username] = entry
chart_data = {}
append_chart(datetime_pivot)
for log in logs_to_replay:
for user in user_points:
user_points[user] -= self.calc_user_points(
user, log.problem_instance, points_functions, user_state_diffs)
prev_problem_state = problem_state_diffs.get(log.problem_instance, ProblemState(0, None))
problem_state_diffs[log.problem_instance] = \
ProblemState(
solve_count=prev_problem_state.solve_count + 1,
first_solve=prev_problem_state.first_solve
if prev_problem_state.first_solve is not None else
log.user
)
self.update_points_function(points_functions, log.problem_instance, problem_state_diffs)
for user in user_points:
user_points[user] += self.calc_user_points(
user, log.problem_instance, points_functions, user_state_diffs)
prev_user_state = user_state_diffs.get(log.user, UserState([], None))
user_state_diffs[log.user] = \
UserState(
solved_problems=prev_user_state.solved_problems + [log.problem_instance],
last_auth=log.datetime
)
prev_point = user_points.get(log.user, 0)
user_points[log.user] = prev_point + self.calc_user_points(
log.user, log.problem_instance, points_functions, user_state_diffs)
append_chart(log.datetime)
append_chart(timezone.now())
def get_user_last_auth(rank_user):
user_state_diff = user_state_diffs.get(rank_user, None)
return self.state.user_states[rank_user].last_auth \
if user_state_diff is None else \
user_state_diff.last_auth
rank_raw = list(map(lambda x: (x[0].username, x[1], get_user_last_auth(x[0])), user_points.items()))
top10_rank = sorted(rank_raw, key=lambda x: (-x[1], x[2]))[:10]
top10_users = list(map(lambda x: x[0], top10_rank))
top10_chart_data = \
map(lambda x: (x[0], json.dumps(x[1])),
filter(lambda x: x[0] in top10_users, chart_data.items()))
return top10_chart_data, top10_rank
| {
"repo_name": "PLUS-POSTECH/study.plus.or.kr",
"path": "src/problem/helpers/score.py",
"copies": "1",
"size": "9240",
"license": "apache-2.0",
"hash": -5413071642158421000,
"line_mean": 39.704845815,
"line_max": 119,
"alpha_frac": 0.6180735931,
"autogenerated": false,
"ratio": 3.938618925831202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013731803641006323,
"num_lines": 227
} |
from functools import reduce
import json
import os
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, QueryStringMatch
_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'aws_privilege_share.json')
_AGGREGATE_KEY = 'details.requestparameters.username'
_IAM_USER_KEY = 'details.useridentity.sessioncontext.sessionissuer.username'
_AWS_EVENT_KEY = 'details.eventname'
_ATTACH_POLICY_ACTION = 'AttachUserPolicy'
class AlertAWSPrivilegeShare(AlertTask):
'''An alert that fires when any of a configured list of AWS IAM users
perform the AttachUserPolicy action on another user. Such activity may
indicate that root privileges are being shared.
'''
def main(self):
with open(_CONFIG_FILE) as cfg_file:
self.config = json.load(cfg_file)
query_string = ' OR '.join([
'{0}: {1}'.format(_IAM_USER_KEY, user)
for user in self.config['rootUsers']
])
query = SearchQuery(**self.config['searchWindow'])
query.add_must([
QueryStringMatch(query_string),
TermMatch(_AWS_EVENT_KEY, _ATTACH_POLICY_ACTION)
])
self.filtersManual(query)
self.searchEventsAggregated(_AGGREGATE_KEY, samplesLimit=10)
self.walkAggregations(threshold=1)
def onAggregation(self, aggreg):
# Index all the way into the first event to get the name of the IAM
# user that attached a new policy to another user.
issuing_user = reduce(
lambda d, k: d and d.get(k),
_IAM_USER_KEY.split('.'),
aggreg['events'][0])
summary = '{0} granted permissions to {1} in AWS'.format(
issuing_user,
aggreg['value'])
category = 'privileges'
tags = ['aws', 'privileges']
events = aggreg['events']
severity = 'NOTICE'
return self.createAlertDict(summary, category, tags, events, severity)
| {
"repo_name": "jeffbryner/MozDef",
"path": "alerts/aws_privilege_share.py",
"copies": "1",
"size": "1986",
"license": "mpl-2.0",
"hash": -1515901782248190700,
"line_mean": 30.5238095238,
"line_max": 78,
"alpha_frac": 0.639979859,
"autogenerated": false,
"ratio": 3.833976833976834,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4973956692976834,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import logging
from sys import stdout
from django.db.models import Model
from django.db.models.query import QuerySet
from .utils import (
get_rendition_key_set,
get_url_from_image_key,
validate_versatileimagefield_sizekey_list
)
logger = logging.getLogger(__name__)
def cli_progress_bar(start, end, bar_length=50):
"""
Prints out a Yum-style progress bar (via sys.stdout.write).
`start`: The 'current' value of the progress bar.
`end`: The '100%' value of the progress bar.
`bar_length`: The size of the overall progress bar.
Example output with start=20, end=100, bar_length=50:
[###########----------------------------------------] 20/100 (100%)
Intended to be used in a loop. Example:
end = 100
for i in range(end):
cli_progress_bar(i, end)
Based on an implementation found here:
http://stackoverflow.com/a/13685020/1149774
"""
percent = float(start) / end
hashes = '#' * int(round(percent * bar_length))
spaces = '-' * (bar_length - len(hashes))
stdout.write(
"\r[{0}] {1}/{2} ({3}%)".format(
hashes + spaces,
start,
end,
int(round(percent * 100))
)
)
stdout.flush()
class VersatileImageFieldWarmer(object):
"""
A class for creating sets of images from a VersatileImageField
"""
def __init__(self, instance_or_queryset,
rendition_key_set, image_attr, verbose=False):
"""
Arguments:
`instance_or_queryset`: A django model instance or QuerySet
`rendition_key_set`: Either a string that corresponds to a key on
settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
or an iterable
of 2-tuples, both strings:
[0]: The 'name' of the image size.
[1]: A VersatileImageField 'size_key'.
Example: [
('large', 'url'),
('medium', 'crop__400x400'),
('small', 'thumbnail__100x100')
]
`image_attr`: A dot-notated path to a VersatileImageField on
`instance_or_queryset`
`verbose`: bool signifying whether a progress bar should be printed
to sys.stdout
"""
if isinstance(instance_or_queryset, Model):
queryset = instance_or_queryset.__class__._default_manager.filter(
pk=instance_or_queryset.pk
)
elif isinstance(instance_or_queryset, QuerySet):
queryset = instance_or_queryset
else:
raise ValueError(
"Only django model instances or QuerySets can be processed by "
"{}".format(self.__class__.__name__)
)
self.queryset = queryset
if isinstance(rendition_key_set, str):
rendition_key_set = get_rendition_key_set(rendition_key_set)
self.size_key_list = [
size_key
for key, size_key in validate_versatileimagefield_sizekey_list(
rendition_key_set
)
]
self.image_attr = image_attr
self.verbose = verbose
@staticmethod
def _prewarm_versatileimagefield(size_key, versatileimagefieldfile):
"""
Returns a 2-tuple:
0: bool signifying whether the image was successfully pre-warmed
1: The url of the successfully created image OR the path on storage of
the image that was not able to be successfully created.
Arguments:
`size_key_list`: A list of VersatileImageField size keys. Examples:
* 'crop__800x450'
* 'thumbnail__800x800'
`versatileimagefieldfile`: A VersatileImageFieldFile instance
"""
versatileimagefieldfile.create_on_demand = True
try:
url = get_url_from_image_key(versatileimagefieldfile, size_key)
except Exception: # pragma: no cover
success = False
url_or_filepath = versatileimagefieldfile.name
logger.exception('Thumbnail generation failed',
extra={'path': url_or_filepath})
else:
success = True
url_or_filepath = url
return (success, url_or_filepath)
def warm(self):
"""
Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded.
"""
num_images_pre_warmed = 0
failed_to_create_image_path_list = []
total = self.queryset.count() * len(self.size_key_list)
for a, instance in enumerate(self.queryset, start=1):
for b, size_key in enumerate(self.size_key_list, start=1):
success, url_or_filepath = self._prewarm_versatileimagefield(
size_key,
reduce(getattr, self.image_attr.split("."), instance)
)
if success is True:
num_images_pre_warmed += 1
if self.verbose:
cli_progress_bar(num_images_pre_warmed, total)
else: # pragma: no cover
failed_to_create_image_path_list.append(url_or_filepath)
if a * b == total and self.verbose:
stdout.write('\n')
if self.verbose:
stdout.flush()
return (num_images_pre_warmed, failed_to_create_image_path_list)
| {
"repo_name": "respondcreate/django-versatileimagefield",
"path": "versatileimagefield/image_warmer.py",
"copies": "1",
"size": "5664",
"license": "mit",
"hash": 1137002021412042200,
"line_mean": 35.7792207792,
"line_max": 79,
"alpha_frac": 0.5667372881,
"autogenerated": false,
"ratio": 4.1677704194260485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5234507707526048,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import logging
import os
from random import sample
from pickle import load
from copy import deepcopy
import numpy as np
import scipy.sparse as sp
import pandas as pd
import six
from composes.composition.lexical_function import LexicalFunction
from discoutils.tokens import DocumentFeature
from discoutils.io_utils import write_vectors_to_disk, write_vectors_to_hdf
from discoutils.thesaurus_loader import Thesaurus, Vectors
from eval.scripts.compress_labelled_data import get_all_document_features
def check_vectors(unigram_source):
if not unigram_source:
raise ValueError('Composers need a unigram vector source')
if not hasattr(unigram_source, 'get_vector'):
raise ValueError('Creating a composer requires a Vectors data structure that holds unigram vectors')
return unigram_source
class ComposerMixin(object):
def compose_all(self, phrases):
"""
Composes all `phrases` and returns all unigrams and `phrases` as a matrix. Does NOT store the composed vectors.
Unigram vectors must be brought in by extending classes.
:param phrases: iterable of `str` or `DocumentFeature`
:return: a tuple of :
1) `csr_matrix` containing all vectors, unigram and composed
2) the columns (features) of the unigram space that was used for composition
3) a row index- dict {Feature: Row}. Maps from a feature to the row in 1) where the vector for that
feature is. Note: This is the opposite of what IO functions in discoutils expect
"""
composable_phrases = [foo for foo in phrases if foo in self]
logging.info('Composing... %s able to compose %d/%d phrases using %d unigrams',
self.name, len(composable_phrases), len(phrases), len(self.unigram_source.name2row))
if not composable_phrases:
raise ValueError('%s cannot compose any of the provided phrases' % self.name)
new_matrix = sp.vstack(self.get_vector(foo) for foo in composable_phrases)
old_len = len(self.unigram_source.name2row)
all_rows = deepcopy(self.unigram_source.name2row) # can't mutate the unigram datastructure
for i, phrase in enumerate(composable_phrases):
key = phrase if isinstance(phrase, str) else str(phrase)
# phrase shouln't be in the unigram source.
assert key not in all_rows
all_rows[key] = i + old_len # this will not append to all_rows if phrase is contained in unigram_source
all_vectors = sp.vstack([self.unigram_source.matrix, new_matrix], format='csr')
assert all_vectors.shape == (len(all_rows), len(self.unigram_source.columns)), 'Shape mismatch'
return all_vectors, self.unigram_source.columns, all_rows
class AdditiveComposer(Vectors, ComposerMixin):
name = 'Add'
# composers in general work with n-grams (for simplicity n<4)
entry_types = {'2-GRAM', '3-GRAM', 'AN', 'NN', 'VO', 'SVO'}
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.function = np.add
def get_vector(self, feature):
"""
:type feature: DocumentFeature
:rtype: scipy.sparse.csr_matrix
"""
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
return sp.csr_matrix(reduce(self.function,
[self.unigram_source.get_vector(str(t)).A for t in feature[:]]))
def contains_impl(self, feature):
"""
Contains all sequences of words where we have a distrib vector for each unigram
they contain. Rejects unigrams.
"""
# if isinstance(feature, six.string_types):
# feature = DocumentFeature.from_string(feature)
feat_str = str(feature) if isinstance(feature, DocumentFeature) else feature
feat_df = feature if isinstance(feature, DocumentFeature) else DocumentFeature.from_string(feature)
if feat_df.type not in self.entry_types:
# no point in trying
return False
return all(f in self.unigram_source for f in feat_str.split(DocumentFeature.ngram_separator))
def __contains__(self, feature):
return self.contains_impl(feature)
def __str__(self):
return '[%s with %d unigram entries]' % (self.__class__.__name__, len(self.unigram_source))
def __len__(self):
# this will also get call when __nonzero__ is called
return len(self.unigram_source)
class AverageComposer(AdditiveComposer):
name = 'Avg'
entry_types = {'2-GRAM', '3-GRAM', 'AN', 'NN', 'VO', 'SVO'}
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.function = np.add
def get_vector(self, feature):
v = super().get_vector(feature) # Add
return v / 2
class MultiplicativeComposer(AdditiveComposer):
name = 'Mult'
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.function = np.multiply
class MinComposer(MultiplicativeComposer):
name = 'Min'
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.function = lambda m, n: np.minimum(m, n)
class MaxComposer(MinComposer):
name = 'Max'
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.function = lambda m, n: np.maximum(m, n)
class LeftmostWordComposer(AdditiveComposer):
name = 'Left'
entry_types = {'2-GRAM', '3-GRAM', 'AN', 'NN', 'VO', 'SVO'}
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.hardcoded_index = 0
def get_vector(self, feature):
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
return self.unigram_source.get_vector(str(feature[self.hardcoded_index]))
def contains_impl(self, feature):
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
if feature.type not in self.entry_types:
# no point in composing single-word document features
return False
return str(feature[self.hardcoded_index]) in self.unigram_source
class RightmostWordComposer(LeftmostWordComposer):
name = 'Right'
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.hardcoded_index = -1
class VerbComposer(LeftmostWordComposer):
"""
Represents verb phrases by the vector of their head
"""
name = 'Verb'
entry_types = {'SVO'}
def __init__(self, unigram_source):
self.unigram_source = check_vectors(unigram_source)
self.hardcoded_index = 1
class BaroniComposer(Vectors, ComposerMixin):
entry_types = {'AN', 'NN'}
name = 'Baroni'
def __init__(self, unigram_source, pretrained_model_file):
self.unigram_source = check_vectors(unigram_source)
if not pretrained_model_file:
logging.error('Expected filename, got %s', pretrained_model_file)
raise ValueError('Model file required to perform composition.')
with open(pretrained_model_file, 'rb') as infile:
self._composer = load(infile)
# verify the composer's internal structure matches the unigram source
self.available_modifiers = set(self._composer.function_space.id2row)
core_space = self.unigram_source.to_dissect_core_space()
assert list(unigram_source.columns) == (self._composer.composed_id2column)
self.dissect_core_space = core_space
# check composed space's columns matches core space's (=unigram source)'s columns
assert core_space.id2column == self._composer.composed_id2column
def __contains__(self, feature):
"""
Accept all adjective-noun or noun-noun phrases where we have a corpus-observed vector for the head and
a learnt matrix (through PLSR) for the modifier
"""
# todo expand unit tests now that we have a real composer
if feature.type not in self.entry_types:
# ignore non-AN features
return False
modifier, head = feature.tokens
assert ('J', 'N') == (modifier.pos, head.pos) or ('N', 'N') == (modifier.pos, head.pos)
# if DocumentFeature('1-GRAM', (noun,)) not in self.unigram_source:
if DocumentFeature.from_string(str(head)) not in self.unigram_source:
# ignore ANs containing unknown nouns
return False
# ignore ANs containing unknown adjectives
return str(modifier) in self.available_modifiers
def __str__(self):
return '[BaroniComposer with %d modifiers and %d heads]' % \
(len(self.available_modifiers), len(self.unigram_source))
def __repr__(self):
return str(self)
def __len__(self):
# this will also get call when __nonzero__ is called
return len(self.available_modifiers)
def get_vector(self, feature):
# todo test properly
"""
:param feature: DocumentFeature to compose, assumed to be an adjective/noun and a noun, with PoS tags
:return:
:rtype: 1xN scipy sparse matrix of type numpy.float64 with M stored elements in Compressed Sparse Row format,
where N is the dimensionality of the vectors in the unigram source
"""
modifier = str(feature.tokens[0])
head = str(feature.tokens[1])
phrase = '{}_{}'.format(modifier, head)
x = self._composer.compose([(modifier, head, phrase)],
self.dissect_core_space).cooccurrence_matrix.mat
return x
class GuevaraComposer(BaroniComposer):
entry_types = {'AN', 'NN'}
name = 'Guevara'
def __init__(self, unigram_source, pretrained_model_file, *args):
self.unigram_source = check_vectors(unigram_source)
if not pretrained_model_file:
logging.error('Expected filename, got %s', pretrained_model_file)
raise ValueError('Model file required to perform composition.')
with open(pretrained_model_file, 'rb') as infile:
self._composer = load(infile)
assert list(unigram_source.columns) == list(self._composer.composed_id2column)
self.dissect_core_space = self.unigram_source.to_dissect_core_space()
# check composed space's columns matches core space's (=unigram source)'s columns
assert self.dissect_core_space.id2column == self._composer.composed_id2column
def __str__(self):
return '[GuevaraComposer with %d modifiers and %d heads]' % \
(len(self.available_modifiers), len(self.unigram_source))
def __contains__(self, feature):
# both head and modifier need to have unigram vectors.
# I don't see why the modifier needs a vector, given that we're using
# its matrix representation instead, but that is what dissect does
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
if feature.type not in self.entry_types:
# no point in trying
return False
return all(str(f) in self.unigram_source for f in feature[:])
class GrefenstetteMultistepComposer(BaroniComposer):
entry_types = {'SVO'}
name = 'Multistep'
def __init__(self, unigram_source, v_model):
self.unigram_source = check_vectors(unigram_source)
self.n_space = self.unigram_source.to_dissect_core_space()
with open(v_model, 'rb') as infile:
self.v_model = load(infile)
# with open(vo_model, 'rb') as infile:
# self.vo_model = load(infile)
self.verbs = self.v_model.function_space.id2row
logging.info('Multistep composer has these verbs:', self.verbs)
def __str__(self):
'Multistep composer with %d verbs and %d nouns' % (len(self.verbs),
len(self.unigram_source))
def __contains__(self, feature):
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
# this is a SVO, we have a verb tensor and vectors for both arguments
return feature.type in self.entry_types and \
feature[1] in self.verbs and \
feature[0] in self.unigram_source and \
feature[2] in self.unigram_source
# alternative- try to compose. if ValueError, we can't
def get_vector(self, df):
# 3. use the trained models to compose new SVO sentences
# 3.1 use the V model to create new VO combinations
data = (str(df[1]), str(df[2]), str(df[1:]))
# ("take/V", "place/N", "take/V_place/N")
vo_composed_space = self.v_model.compose([data], self.n_space)
# todo how do we get VO vectors? these are (100x100)+100 dimensional (intercept).
# todo do we allow document features of different dimensionality
# vo_composed_space.cooccurrence_matrix.mat
# 3.2 the new VO combinations will be used as functions:
# load the new VO combinations obtained through composition into
# a new composition model
expanded_vo_model = LexicalFunction(function_space=vo_composed_space,
intercept=self.v_model._has_intercept)
# 3.3 use the new VO combinations by composing them with subject nouns
# in order to obtain new SVO sentences
data = (str(df[1:]), str(df[0]), str(df))
svo_composed_space = expanded_vo_model.compose([data], self.n_space)
# print the composed spaces:
# logging.info("SVO composed space:")
# logging.info(svo_composed_space.id2row)
# logging.info(svo_composed_space.cooccurrence_matrix)
# get vectors out. these are 100-dimensional
return svo_composed_space.cooccurrence_matrix.mat
class CopyObject(Vectors, ComposerMixin):
name = 'CopyObj'
entry_types = {'SVO'}
def __init__(self, verbs_file, unigram_source):
self.verb_tensors = dict()
with pd.get_store(verbs_file) as store:
for verb in store.keys():
self.verb_tensors[verb[1:] + '/V'] = store[verb].values
logging.info('Found %d verb tensors in %s', len(self.verb_tensors), verbs_file)
if not self.verb_tensors:
raise ValueError('Cant build a categorical model without verb matrices')
self.unigram_source = unigram_source
def __contains__(self, feature):
if isinstance(feature, six.string_types):
feature = DocumentFeature.from_string(feature)
# this is a SVO, we have a verb tensor and vectors for both arguments
return feature.type in self.entry_types and \
str(feature[1]) in self.verb_tensors and \
str(feature[0]) in self.unigram_source and \
str(feature[2]) in self.unigram_source
def get_vector(self, phrase_df):
subj, verb, obj = map(str, phrase_df.tokens)
subj_v = self.unigram_source.get_vector(subj).A.T # shape 100x1
verb_m = self.verb_tensors[verb] # shape 100x100
obj_v = self.unigram_source.get_vector(obj).A.T # shape 100x1
vec = subj_v * np.dot(verb_m, obj_v)
return sp.csr_matrix(vec.T) # type needs to be compatible w other composers
def __str__(self):
return '%s composer with %d verbs and %d unigrams' % (self.name,
len(self.verb_tensors),
len(self.unigram_source))
class FrobeniusAdd(CopyObject):
name = 'FAdd'
entry_types = {'SVO'}
function = np.add
def get_vector(self, phrase_df):
subj, verb, obj = map(str, phrase_df.tokens)
subj_v = self.unigram_source.get_vector(subj).A.T # shape 100x1
verb_m = self.verb_tensors[verb] # shape 100x100
obj_v = self.unigram_source.get_vector(obj).A.T # shape 100x1
vec = self.function((subj_v * np.dot(verb_m, obj_v)), (obj_v * np.dot(verb_m.T, subj_v)))
return sp.csr_matrix(vec.T)
class FrobeniusMult(FrobeniusAdd):
name = 'FMult'
entry_types = {'SVO'}
function = np.multiply
class DummyThesaurus(Thesaurus):
"""
A thesaurus-like object which return "b/N" as the only neighbour of every possible entry
"""
name = 'Constant'
def __init__(self):
pass
def get_nearest_neighbours(self, feature):
return [('b/N', 1.0)]
def get_vector(self):
pass
def to_shelf(self, *args, **kwargs):
pass
def __len__(self):
return 9999999
def __contains__(self, feature):
return True
class RandomThesaurus(DummyThesaurus):
"""
A thesaurus-like object which returns a single random neighbour for every possible entry. That neighbour
is chosen from the vocabulary that is passed in (as a dict {feature:index} )
"""
name = 'Random'
def __init__(self, vocab=None, k=1):
self.vocab = vocab
self.k = k
def get_nearest_neighbours(self, item):
if not self.vocab:
raise ValueError('You need to provide a set of value to choose from first.')
return [(str(foo), 1.) for foo in sample(self.vocab, self.k)]
def default_row_filter(feat_str: str, feat_df: DocumentFeature):
return feat_df.tokens[0].pos in {'N', 'J', 'V'} and feat_df.type == '1-GRAM'
def default_row_filter_nopos(feat_str: str, feat_df: DocumentFeature):
return feat_str.count('_') == 0 and feat_str.count('/') == 0
# todo this doesnt work when the data isn't PoS tagged
def compose_and_write_vectors(unigram_vectors, short_vector_dataset_name, composer_classes, remove_pos= False,
pretrained_Baroni_composer_file=None, pretrained_Guevara_composer_file=None,
pretrained_Gref_composer_file=None, categorical_vector_matrix_file=None,
output_dir='.', gzipped=True, dense_hd5=False,
row_filter=default_row_filter):
"""
Extracts all composable features from a labelled classification corpus and dumps a composed vector for each of them
to disk. The output file will also contain all unigram vectors that were passed in, and only unigrams!
:param unigram_vectors: a file in Byblo events format that contain vectors for all unigrams OR
a Vectors object. This will be used in the composition process.
:type unigram_vectors: str or Vectors
:param classification_corpora: Corpora to extract features from. Dict {corpus_path: conf_file}
:param pretrained_Baroni_composer_file: path to pre-trained Baroni AN/NN composer file
:param output_dir:
:param composer_classes: what composers to use
:type composer_classes: list
"""
phrases_to_compose = get_all_document_features(remove_pos=remove_pos)
# if this isn't a Vectors object assume it's the name of a file containing vectors and load them
if not isinstance(unigram_vectors, Vectors):
# ensure there's only unigrams in the set of unigram vectors
# composers do not need any ngram vectors contain in this file, they may well be
# observed ones
unigram_vectors = Vectors.from_tsv(unigram_vectors,
row_filter=row_filter)
logging.info('Starting composition with %d unigram vectors', len(unigram_vectors))
# doing this loop in parallel isn't worth it as pickling or shelving `vectors` is so slow
# it negates any gains from using multiple cores
for composer_class in composer_classes:
if composer_class == BaroniComposer:
assert pretrained_Baroni_composer_file is not None
composer = BaroniComposer(unigram_vectors, pretrained_Baroni_composer_file)
elif composer_class == GuevaraComposer:
assert pretrained_Guevara_composer_file is not None
composer = GuevaraComposer(unigram_vectors, pretrained_Guevara_composer_file)
elif composer_class == GrefenstetteMultistepComposer:
assert pretrained_Gref_composer_file is not None
composer = GrefenstetteMultistepComposer(unigram_vectors, pretrained_Gref_composer_file)
elif composer_class in [CopyObject, FrobeniusAdd, FrobeniusMult]:
composer = composer_class(categorical_vector_matrix_file, unigram_vectors)
else:
composer = composer_class(unigram_vectors)
try:
# compose_all returns all unigrams and composed phrases
mat, cols, rows = composer.compose_all(phrases_to_compose)
events_path = os.path.join(output_dir,
'composed_%s_%s.events.filtered.strings' % (short_vector_dataset_name,
composer.name))
if dense_hd5:
write_vectors_to_hdf(mat, rows, cols, events_path)
else:
rows2idx = {i: DocumentFeature.from_string(x) for (x, i) in rows.items()}
write_vectors_to_disk(mat.tocoo(), rows2idx, cols, events_path,
entry_filter=lambda x: x.type in {'AN', 'NN', 'VO', 'SVO', '1-GRAM'},
gzipped=gzipped)
except ValueError as e:
logging.error('RED ALERT, RED ALERT')
logging.error(e)
continue
| {
"repo_name": "mbatchkarov/vector_builder",
"path": "builder/composers/vectorstore.py",
"copies": "1",
"size": "21805",
"license": "bsd-3-clause",
"hash": -743606114635063800,
"line_mean": 40.4543726236,
"line_max": 119,
"alpha_frac": 0.6345333639,
"autogenerated": false,
"ratio": 3.703294836956522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9830263195114397,
"avg_score": 0.0015130011484249274,
"num_lines": 526
} |
from functools import reduce
import logging
import platform
import re
import unittest
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
from common import get_check
from monasca_agent.collector.checks.system.unix import Cpu
from monasca_agent.collector.checks.system.unix import Disk
from monasca_agent.collector.checks.system.unix import IO
from monasca_agent.collector.checks.system.unix import Memory
class TestSystem(unittest.TestCase):
def testCPU(self):
global logger
cpu = Cpu(logger)
res = cpu.check()
# Make sure we sum up to 100% (or 99% in the case of macs)
assert abs(reduce(lambda a, b: a + b, res.values(), 0) - 100) <= 5, res
lion_df_i = """Filesystem 512-blocks Used Available Capacity iused ifree %iused Mounted onto
/dev/disk1 487932936 220080040 267340896 46% 27574003 33417612 45% /
devfs 374 374 0 100% 648 0 100% /dev
map -hosts 0 0 0 100% 0 0 100% /net
map auto_home 0 0 0 100% 0 0 100% /home
localhost:/KJDS7Bgpbp1QglL9lBwOe6 487932936 487932936 0 100% 0 0 100% /Volumes/MobileBackups
/dev/disk2s1 62309376 5013120 57296256 9% 0 0 100% /Volumes/NO name"""
lion_df_k = """Filesystem 1024-blocks Used Available Capacity Mounted onto
/dev/disk1 243966468 110040020 133670448 46% /
devfs 187 187 0 100% /dev
map -hosts 0 0 0 100% /net
map auto_home 0 0 0 100% /home
localhost:/KJDS7Bgpbp1QglL9lBwOe6 243966468 243966468 0 100% /Volumes/MobileBackups
/dev/disk2s1 31154688 2506560 28648128 9% /Volumes/NO NAME"""
linux_df_k = """Filesystem 1K-blocks Used Available Use% Mounted on
/dev/sda1 8256952 5600592 2236932 72% /
none 3802316 124 3802192 1% /dev
none 3943856 0 3943856 0% /dev/shm
none 3943856 148 3943708 1% /var/run
none 3943856 0 3943856 0% /var/lock
none 3943856 0 3943856 0% /lib/init/rw
/dev/sdb 433455904 305360 411132240 1% /mnt
/dev/sdf 52403200 40909112 11494088 79% /data
nfs:/abc/def/ghi/jkl/mno/pqr
52403200 40909112 11494088 79% /data2
/dev/sdg 52403200 40909112 11494088 79% /data3
tmpfs 14039440 256 14039184 1% /run
/dev/xvdf1 209612800 144149992 65462808 69% /var/lib/postgresql/9.1/main
/dev/xvdf2 209612800 2294024 207318776 2% /var/lib/postgresql/9.1/main/pg_xlog
/dev/xvdf3 2086912 1764240 322672 85% /var/lib/postgresql/9.1/user_influence_history
/dev/xvdf4 41922560 12262780 29659780 30% /var/lib/postgresql/9.1/entity_love
/dev/xvdf5 10475520 3943856 6531664 38% /var/lib/postgresql/9.1/user_profile
/dev/xvdf6 10475520 5903964 4571556 57% /var/lib/postgresql/9.1/entity_love_history
/dev/xvdf7 8378368 33288 8345080 1% /var/lib/postgresql/9.1/_user_profile_queue
/dev/xvdf8 41922560 6784964 35137596 17% /var/lib/postgresql/9.1/entity_entity
/dev/xvdf9 2086912 33480 2053432 2% /var/lib/postgresql/9.1/event_framework_event_handler_queue
/dev/xvdi1 2086912 33488 2053424 2% /var/lib/postgresql/9.1/user_communication_queue
/dev/xvdi2 52403200 9960744 42442456 20% /var/lib/postgresql/9.1/affiliate_click_tracking
/dev/xvdi3 31441920 9841092 21600828 32% /var/lib/postgresql/9.1/index01
/dev/xvdi4 31441920 10719884 20722036 35% /var/lib/postgresql/9.1/index02
/dev/xvdi5 31441920 9096476 22345444 29% /var/lib/postgresql/9.1/index03
/dev/xvdi6 31441920 6473916 24968004 21% /var/lib/postgresql/9.1/index04
/dev/xvdi7 31441920 3519356 27922564 12% /var/lib/postgresql/9.1/index05
"""
linux_df_i = """Filesystem Inodes IUsed IFree IUse% Mounted on
/dev/sda1 524288 171642 352646 33% /
none 950579 2019 948560 1% /dev
none 985964 1 985963 1% /dev/shm
none 985964 66 985898 1% /var/run
none 985964 3 985961 1% /var/lock
none 985964 1 985963 1% /lib/init/rw
/dev/sdb 27525120 147 27524973 1% /mnt
/dev/sdf 46474080 478386 45995694 2% /data
"""
def testDfParser(self):
global logger
disk = Disk(logger)
res = disk.parse_df_output(TestSystem.lion_df_k, 'darwin')
assert res[0][:4] == ["/dev/disk1", 243966468, 110040020, 133670448], res[0]
assert res[3][:4] == ["/dev/disk2s1", 31154688, 2506560, 28648128], res[3]
res = disk.parse_df_output(TestSystem.lion_df_i, 'darwin', inodes=True)
assert res[0][:4] == ["/dev/disk1", 60991615, 27574003, 33417612], res[0]
# Test parsing linux output.
res = disk.parse_df_output(TestSystem.linux_df_k, 'linux2')
assert len(res) == 22
assert res[0][:4] == ["/dev/sda1", 8256952, 5600592, 2236932], res[0]
assert res[2][:4] == ["/dev/sdf", 52403200, 40909112, 11494088], res[2]
assert res[3][:4] == ["nfs:/abc/def/ghi/jkl/mno/pqr", 52403200, 40909112, 11494088], res[3]
assert res[4][:4] == ["/dev/sdg", 52403200, 40909112, 11494088], res[4]
# Test parsing linux output but filter some of the nodes.
blacklist_re = re.compile('/dev/xvdi.*')
res = disk.parse_df_output(TestSystem.linux_df_k, 'linux2', blacklist_re=blacklist_re)
assert res[0][:4] == ["/dev/sda1", 8256952, 5600592, 2236932], res[0]
assert len(res) == 15, len(res)
res = disk.parse_df_output(TestSystem.linux_df_i, 'linux2', inodes=True)
assert res[0][:4] == ["/dev/sda1", 524288, 171642, 352646], res[0]
assert res[1][:4] == ["/dev/sdb", 27525120, 147, 27524973], res[1]
assert res[2][:4] == ["/dev/sdf", 46474080, 478386, 45995694], res[2]
res = disk.parse_df_output(TestSystem.linux_df_k, 'linux2', use_mount=True)
assert res[0][:4] == ["/", 8256952, 5600592, 2236932], res[0]
assert res[2][:4] == ["/data", 52403200, 40909112, 11494088], res[2]
assert res[3][:4] == ["/data2", 52403200, 40909112, 11494088], res[3]
assert res[4][:4] == ["/data3", 52403200, 40909112, 11494088], res[4]
assert res[-1][:4] == ["/var/lib/postgresql/9.1/index05",
31441920, 3519356, 27922564], res[-1]
def test_collecting_disk_metrics(self):
"""Testing disk stats gathering"""
if platform.system() == 'Linux':
disk = Disk(logger, {})
res = disk.check()
# Assert we have disk & inode stats
assert len(res) == 2
assert res.keys()[0]
assert res.keys()[1]
def testMemory(self):
global logger
res = Memory(logger).check()
if platform.system() == 'Linux':
for k in (
"swapTotal", "swapFree", "swapPctFree", "swapUsed", "physTotal", "physFree",
"physUsed", "physBuffers", "physCached", "physUsable", "physPctUsable",
"physShared"):
assert k in res, res
assert res["swapTotal"] == res["swapFree"] + res["swapUsed"]
assert res["physTotal"] == res["physFree"] + res["physUsed"]
elif platform.system() == 'Darwin':
for k in ("swapFree", "swapUsed", "physFree", "physUsed"):
assert k in res, res
def testDiskLatency(self):
# example output from `iostat -d 1 2 -x -k` on
# debian testing x86_64, from Debian package
# sysstat@10.0.4-1
debian_iostat_output = """Linux 3.2.0-2-amd64 (fireflyvm) 05/29/2012 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.44 2.58 5.79 2.84 105.53 639.03 172.57 0.17 19.38 1.82 55.26 0.66 0.57
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
"""
global logger
checker = IO(logger)
results = checker._parse_linux2(debian_iostat_output)
self.assertTrue('sda' in results)
for key in (
'io_read_req_sec', 'io_write_req_sec', 'io_read_kbytes_sec', 'io_write_kbytes_sec'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
self.assertEqual(results['sda'][key], '0.00')
# example output from `iostat -d 1 d -x -k` on
# centos 5.8 x86_64, from RPM package
# sysstat@7.0.2; it differs from the above by
# not having split-out r_await and w_await fields
centos_iostat_output = """Linux 2.6.18-308.el5 (localhost.localdomain) 05/29/2012
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 9.44 7.56 16.76 4.40 322.05 47.75 34.96 0.01 0.59 0.35 0.74
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
"""
checker = IO(logger)
results = checker._parse_linux2(centos_iostat_output)
self.assertTrue('sda' in results)
for key in (
'io_read_req_sec', 'io_write_req_sec', 'io_read_kbytes_sec', 'io_write_kbytes_sec'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
self.assertEqual(results['sda'][key], '0.00')
# iostat -o -d -c 2 -w 1
# OS X 10.8.3 (internal SSD + USB flash attached)
darwin_iostat_output = """ disk0 disk1
KB/t tps MB/s KB/t tps MB/s
21.11 23 0.47 20.01 0 0.00
6.67 3 0.02 0.00 0 0.00
"""
checker = IO(logger)
results = checker._parse_darwin(darwin_iostat_output)
self.assertTrue("disk0" in results.keys())
self.assertTrue("disk1" in results.keys())
self.assertEqual(
results["disk0"],
{'system.io.bytes_per_s': float(0.02 * 10 ** 6), }
)
self.assertEqual(
results["disk1"],
{'system.io.bytes_per_s': float(0), }
)
def testNetwork(self):
config = """
init_config:
instances:
- collect_connection_state: true
excluded_interfaces:
- lo
- lo0
"""
check, instances = get_check('network', config)
check.check(instances[0])
check.get_metrics()
metric_names = [m[0] for m in check.aggregator.metrics]
assert 'net_bytes_in' in metric_names
assert 'net_bytes_out' in metric_names
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "sapcc/monasca-agent",
"path": "tests_to_fix/test_system.py",
"copies": "1",
"size": "11608",
"license": "bsd-3-clause",
"hash": -5444452183412632000,
"line_mean": 48.3957446809,
"line_max": 129,
"alpha_frac": 0.5537560303,
"autogenerated": false,
"ratio": 2.9424588086185044,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8982164848636092,
"avg_score": 0.0028099980564825376,
"num_lines": 235
} |
from functools import reduce
import logging
from django.conf import settings as django_settings
from rest_framework import exceptions
from waldur_core.core.permissions import SAFE_METHODS, IsAdminOrReadOnly
from waldur_core.structure import models
logger = logging.getLogger(__name__)
# TODO: this is a temporary permission filter.
class IsAdminOrOwner(IsAdminOrReadOnly):
"""
Allows access to admin users or account's owner for modifications.
For other users read-only access.
"""
def has_permission(self, request, view):
user = request.user
if user.is_staff or request.method in SAFE_METHODS:
return True
elif view.suffix == 'List' or request.method == 'DELETE':
return False
# Fix for schema generation
elif 'uuid' not in view.kwargs:
return False
return user == view.get_object()
def is_staff(request, view, obj=None):
if not request.user.is_staff:
raise exceptions.PermissionDenied()
def is_owner(request, view, obj=None):
if not obj:
return
customer = _get_customer(obj)
if not _has_owner_access(request.user, customer):
raise exceptions.PermissionDenied()
def is_manager(request, view, obj=None):
if not obj:
return
project = _get_project(obj)
if not _has_manager_access(request.user, project):
raise exceptions.PermissionDenied()
def is_administrator(request, view, obj=None):
if not obj:
return
project = _get_project(obj)
if not _has_admin_access(request.user, project):
raise exceptions.PermissionDenied()
def _has_owner_access(user, customer):
return user.is_staff or customer.has_user(user, models.CustomerRole.OWNER)
def _has_manager_access(user, project):
return _has_owner_access(user, project.customer) or project.has_user(user, models.ProjectRole.MANAGER)
def _has_admin_access(user, project):
return _has_manager_access(user, project) or project.has_user(user, models.ProjectRole.ADMINISTRATOR)
def _get_parent_by_permission_path(obj, permission_path):
path = getattr(obj.Permissions, permission_path, None)
if path is None:
return
if path == 'self':
return obj
return reduce(getattr, path.split('__'), obj)
def _get_project(obj):
return _get_parent_by_permission_path(obj, 'project_path')
def _get_customer(obj):
return _get_parent_by_permission_path(obj, 'customer_path')
def check_access_to_services_management(request, view, obj=None):
if django_settings.WALDUR_CORE['ONLY_STAFF_MANAGES_SERVICES'] and not request.user.is_staff:
raise exceptions.PermissionDenied()
| {
"repo_name": "opennode/nodeconductor",
"path": "waldur_core/structure/permissions.py",
"copies": "1",
"size": "2681",
"license": "mit",
"hash": 104132609789461010,
"line_mean": 27.8279569892,
"line_max": 106,
"alpha_frac": 0.6926519955,
"autogenerated": false,
"ratio": 3.7132963988919667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49059483943919663,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import math
from collections import Counter
def build_structure(f):
d = {
'male': [],
'female': [],
'lastname_1': [],
'lastname_2': [],
}
keys = list(d.keys())
k = keys.pop(0)
for line in open(f).readlines():
line = line.strip()
if line == '---':
k = keys.pop(0)
continue
d[k].append(line)
return d
def starwarsify(struct, fname, lname, sex):
k = 'male' if sex == 'M' else 'female'
first = struct[k][sum([ord(c) for c in fname]) % len(struct[k])]
half = math.ceil(len(lname) / 2.0)
p1 = lname[:half]
p2 = lname[half:]
last = struct['lastname_1'][sum([ord(c.lower()) - 96 for c in p1]) % len(struct['lastname_1'])]
factor = reduce(lambda x, y: x * y, [ord(c) for c in p2]) * (len(fname) if sex == 'M' else len(fname) + len(lname))
last += struct['lastname_2'][int(''.join(sorted(str(factor), reverse=True))) % len(struct['lastname_2'])]
return first + ' ' + last
def test_starwarsify():
struct = build_structure('input/names.txt')
assert 'Poe Lightverse' == starwarsify(struct, 'Jan', 'Johannsen', 'M')
if __name__ == '__main__':
struct = build_structure('input/names.txt')
names = []
for line in open('input/employees.csv'):
fname, lname, sex = line.strip().split(',')
names.append(starwarsify(struct, fname, lname, sex))
print(Counter(names).most_common(10))
| {
"repo_name": "matslindh/codingchallenges",
"path": "knowit2019/18.py",
"copies": "1",
"size": "1487",
"license": "mit",
"hash": -5055254277627946000,
"line_mean": 25.0877192982,
"line_max": 119,
"alpha_frac": 0.5615332885,
"autogenerated": false,
"ratio": 3.130526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9185998998228868,
"avg_score": 0.001212121212121212,
"num_lines": 57
} |
from functools import reduce
import mmap
from multiprocessing.managers import DictProxy, SyncManager, Server
import os
import random
import struct
import sys
try:
from posixshmem import _PosixSharedMemory, Error, ExistentialError, O_CREX
except ImportError as ie:
if os.name != "nt":
# On Windows, posixshmem is not required to be available.
raise ie
else:
_PosixSharedMemory = object
class ExistentialError(BaseException): pass
class Error(BaseException): pass
O_CREX = -1
class WindowsNamedSharedMemory:
def __init__(self, name, flags=None, mode=None, size=None, read_only=False):
if name is None:
name = f'wnsm_{os.getpid()}_{random.randrange(100000)}'
self._mmap = mmap.mmap(-1, size, tagname=name)
self.buf = memoryview(self._mmap)
self.name = name
self.size = size
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
def close(self):
self.buf.release()
self._mmap.close()
def unlink(self):
"""Windows ensures that destruction of the last reference to this
named shared memory block will result in the release of this memory."""
pass
class PosixSharedMemory(_PosixSharedMemory):
def __init__(self, name, flags=None, mode=None, size=None, read_only=False):
if name and (flags is None):
_PosixSharedMemory.__init__(self, name)
else:
if name is None:
name = f'psm_{os.getpid()}_{random.randrange(100000)}'
_PosixSharedMemory.__init__(self, name, flags=O_CREX, size=size)
self._mmap = mmap.mmap(self.fd, self.size)
self.buf = memoryview(self._mmap)
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
def close(self):
self.buf.release()
self._mmap.close()
self.close_fd()
class SharedMemory:
def __new__(cls, *args, **kwargs):
if os.name == 'nt':
cls = WindowsNamedSharedMemory
else:
cls = PosixSharedMemory
return cls(*args, **kwargs)
def alt_shareable_wrap(existing_type_or_obj, additional_excluded_methods=[]):
if isinstance(existing_type_or_obj, type):
existing_type = existing_type_or_obj
existing_obj = None
else:
existing_type = type(existing_type_or_obj)
existing_obj = existing_type_or_obj
excluded_methods = {
"__new__", "__class__", "__copy__", "__deepcopy__", "__getattribute__",
"__hash__", "__init__", "__init_subclass__", "__reduce__",
"__reduce_ex__", "__getattr__", "__setattr__", "__getstate__",
"__setstate__", "__sizeof__", "__subclasshook__", "__subclasscheck__",
"__instancecheck__", "__abstractmethods__", "__base__", "__bases__",
"__basicsize__", "__dict__", "__dictoffset__", "__flags__",
"__itemsize__", "__mro__", "__name__", "__qualname__",
"__text_signature__", "__weakrefoffset__", "__repr__", "__str__",
"__dir__",
}
excluded_methods.update(additional_excluded_methods)
kept_dunders = {
attr: (
lambda self, *args, _attr=attr, **kwargs:
getattr(existing_type, _attr)(self._wrapped_obj, *args, **kwargs)
)
for attr in dir(existing_type) if attr not in excluded_methods
}
class CustomShareableWrap(ShareableWrappedObject, **kept_dunders):
pass
CustomShareableWrap.__name__ = f"alt_shareable_wrap({existing_type.__name__})"
if existing_obj is None:
return CustomShareableWrap
else:
return CustomShareableWrap(existing_type_or_obj)
class ShareableWrappedObject:
def __init__(self, existing_obj=None, shmem_name=None, **kwargs):
if existing_obj is not None:
# TODO: replace use of reduce below with next 2 lines once available
#agg = existing_obj.itemsize
#size = [ agg := i * agg for i in existing_obj.shape ][-1]
size = reduce(
lambda x, y: x * y,
existing_obj.shape,
existing_obj.itemsize
)
else:
assert shmem_name is not None
size = 1
self._shm = SharedMemory(shmem_name, size=size)
existing_kwargs = self._build_kwargs(existing_obj)
kwargs.update(existing_kwargs)
obj_type = type(existing_obj) if "cls" not in kwargs else kwargs["cls"]
self._wrapped_obj = obj_type(buffer=self._shm.buf, **kwargs)
if existing_obj is not None:
mveo = memoryview(existing_obj)
self._shm.buf[:mveo.nbytes] = mveo.tobytes()
@staticmethod
def _build_kwargs(existing_obj):
kwargs = {
"shape": existing_obj.shape,
"strides": existing_obj.strides,
}
try:
kwargs["dtype"] = existing_obj.dtype
except AttributeError:
pass
return kwargs
def __init_subclass__(cls, **kwargs):
for attr, value in kwargs.items():
try:
setattr(cls, attr, value)
except Exception as e:
raise AttributeError(f"{attr!r} could not be set as attribute")
def __getattr__(self, attr):
return getattr(self._wrapped_obj, attr)
def __repr__(self):
formatted_pairs = ("%s=%r" % kv for kv in self.__getstate__().items())
return f"{self.__class__.__name__}({', '.join(formatted_pairs)})"
def __getstate__(self):
kwargs = self._build_kwargs(self._wrapped_obj)
kwargs["shmem_name"] = self._shm.name
kwargs["cls"] = type(self._wrapped_obj)
return kwargs
def __setstate__(self, state):
self.__init__(**state)
#class ShareableWrappedObject(_ShareableWrappedObject):
#
# def __new__(cls, existing_obj=None, shmem_name=None, **kwargs):
# wrapped_type = existing_obj.__class__ #type(existing_obj)
# dunders = (attr for attr in dir(wrapped_type) if attr.startswith("__"))
# for attr in dunders:
# #setattr(cls, attr, getattr(wrapped_type, attr))
# cls[attr] = wrapped_type[attr]
# return type.__new__(cls.__name__, cls.__bases__, cls.__dict__)
encoding = "utf8"
class ShareableList:
"""Pattern for a mutable list-like object shareable via a shared
memory block. It differs from the built-in list type in that these
lists can not change their overall length (i.e. no append, insert,
etc.)
Because values are packed into a memoryview as bytes, the struct
packing format for any storable value must require no more than 8
characters to describe its format."""
# TODO: Adjust for discovered word size of machine.
types_mapping = {
int: "q",
float: "d",
bool: "xxxxxxx?",
str: "%ds",
bytes: "%ds",
None.__class__: "xxxxxx?x",
}
alignment = 8
back_transform_codes = {
0: lambda value: value, # int, float, bool
1: lambda value: value.rstrip(b'\x00').decode(encoding), # str
2: lambda value: value.rstrip(b'\x00'), # bytes
3: lambda _value: None, # None
}
@staticmethod
def _extract_recreation_code(value):
"""Used in concert with back_transform_codes to convert values
into the appropriate Python objects when retrieving them from
the list as well as when storing them."""
if not isinstance(value, (str, bytes, None.__class__)):
return 0
elif isinstance(value, str):
return 1
elif isinstance(value, bytes):
return 2
else:
return 3 # NoneType
def __init__(self, iterable=None, name=None):
if iterable is not None:
_formats = [
self.types_mapping[type(item)] if not isinstance(item, (str, bytes))
else self.types_mapping[type(item)] % (
self.alignment * (len(item) // self.alignment + 1),
)
for item in iterable
]
self._list_len = len(_formats)
assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
self._allocated_bytes = tuple(
self.alignment if fmt[-1] != "s" else int(fmt[:-1])
for fmt in _formats
)
_back_transform_codes = [
self._extract_recreation_code(item) for item in iterable
]
requested_size = struct.calcsize(
"q" + self._format_size_metainfo + "".join(_formats)
)
else:
requested_size = 1 # Some platforms require > 0.
self.shm = SharedMemory(name, size=requested_size)
if iterable is not None:
_enc = encoding
struct.pack_into(
"q" + self._format_size_metainfo,
self.shm.buf,
0,
self._list_len,
*(self._allocated_bytes)
)
struct.pack_into(
"".join(_formats),
self.shm.buf,
self._offset_data_start,
*(v.encode(_enc) if isinstance(v, str) else v for v in iterable)
)
struct.pack_into(
self._format_packing_metainfo,
self.shm.buf,
self._offset_packing_formats,
*(v.encode(_enc) for v in _formats)
)
struct.pack_into(
self._format_back_transform_codes,
self.shm.buf,
self._offset_back_transform_codes,
*(_back_transform_codes)
)
else:
self._list_len = len(self) # Obtains size from offset 0 in buffer.
self._allocated_bytes = struct.unpack_from(
self._format_size_metainfo,
self.shm.buf,
1 * 8
)
def _get_packing_format(self, position):
"Gets the packing format for a single value stored in the list."
position = position if position >= 0 else position + self._list_len
if (position >= self._list_len) or (self._list_len < 0):
raise IndexError("Requested position out of range.")
v = struct.unpack_from(
"8s",
self.shm.buf,
self._offset_packing_formats + position * 8
)[0]
fmt = v.rstrip(b'\x00')
fmt_as_str = fmt.decode(encoding)
return fmt_as_str
def _get_back_transform(self, position):
"Gets the back transformation function for a single value."
position = position if position >= 0 else position + self._list_len
if (position >= self._list_len) or (self._list_len < 0):
raise IndexError("Requested position out of range.")
transform_code = struct.unpack_from(
"b",
self.shm.buf,
self._offset_back_transform_codes + position
)[0]
transform_function = self.back_transform_codes[transform_code]
return transform_function
def _set_packing_format_and_transform(self, position, fmt_as_str, value):
"""Sets the packing format and back transformation code for a
single value in the list at the specified position."""
position = position if position >= 0 else position + self._list_len
if (position >= self._list_len) or (self._list_len < 0):
raise IndexError("Requested position out of range.")
struct.pack_into(
"8s",
self.shm.buf,
self._offset_packing_formats + position * 8,
fmt_as_str.encode(encoding)
)
transform_code = self._extract_recreation_code(value)
struct.pack_into(
"b",
self.shm.buf,
self._offset_back_transform_codes + position,
transform_code
)
def __getitem__(self, position):
try:
offset = self._offset_data_start + sum(self._allocated_bytes[:position])
(v,) = struct.unpack_from(
self._get_packing_format(position),
self.shm.buf,
offset
)
except IndexError:
raise IndexError("index out of range")
back_transform = self._get_back_transform(position)
v = back_transform(v)
return v
def __setitem__(self, position, value):
try:
offset = self._offset_data_start + sum(self._allocated_bytes[:position])
current_format = self._get_packing_format(position)
except IndexError:
raise IndexError("assignment index out of range")
if not isinstance(value, (str, bytes)):
new_format = self.types_mapping[type(value)]
else:
if len(value) > self._allocated_bytes[position]:
raise ValueError("exceeds available storage for existing str")
if current_format[-1] == "s":
new_format = current_format
else:
new_format = self.types_mapping[str] % (
self._allocated_bytes[position],
)
self._set_packing_format_and_transform(
position,
new_format,
value
)
value = value.encode(encoding) if isinstance(value, str) else value
struct.pack_into(new_format, self.shm.buf, offset, value)
def __len__(self):
return struct.unpack_from("q", self.shm.buf, 0)[0]
@property
def format(self):
"The struct packing format used by all currently stored values."
return "".join(self._get_packing_format(i) for i in range(self._list_len))
@property
def _format_size_metainfo(self):
"The struct packing format used for metainfo on storage sizes."
return f"{self._list_len}q"
@property
def _format_packing_metainfo(self):
"The struct packing format used for the values' packing formats."
return "8s" * self._list_len
@property
def _format_back_transform_codes(self):
"The struct packing format used for the values' back transforms."
return "b" * self._list_len
@property
def _offset_data_start(self):
return (self._list_len + 1) * 8 # 8 bytes per "q"
@property
def _offset_packing_formats(self):
return self._offset_data_start + sum(self._allocated_bytes)
@property
def _offset_back_transform_codes(self):
return self._offset_packing_formats + self._list_len * 8
@classmethod
def copy(cls, self):
"L.copy() -> ShareableList -- a shallow copy of L."
return cls(self)
def count(self, value):
"L.count(value) -> integer -- return number of occurrences of value."
return sum(value == entry for entry in self)
def index(self, value):
"""L.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present."""
for position, entry in enumerate(self):
if value == entry:
return position
else:
raise ValueError(f"{value!r} not in this container")
class SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment):
print(f"DBG Registering segment {segment.name!r} in pid {os.getpid()}")
self.segment_names.append(segment.name)
def destroy_segment(self, segment_name):
print(f"DBG Destroying segment {segment_name!r} in pid {os.getpid()}")
self.segment_names.remove(segment_name)
segment = SharedMemory(segment_name, size=1)
segment.close()
segment.unlink()
def unlink(self):
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
print(f"DBG somebody called {self.__class__.__name__}.__del__: {os.getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class AugmentedServer(Server):
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.shared_memory_context = \
SharedMemoryTracker(f"shmm_{self.address}_{os.getpid()}")
print(f"DBG AugmentedServer started by pid {os.getpid()}")
def create(self, c, typeid, *args, **kwargs):
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
class SharedMemoryManager(SyncManager):
"""Like SyncManager but uses AugmentedServer instead of Server.
TODO: relocate/merge into managers submodule."""
_Server = AugmentedServer
def __init__(self, *args, **kwargs):
# TODO: Remove after debugging satisfied
SyncManager.__init__(self, *args, **kwargs)
print(f"{self.__class__.__name__} created by pid {os.getpid()}")
def __del__(self):
# TODO: Remove after debugging satisfied
print(f"{self.__class__.__name__} told to die by pid {os.getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return AugmentedServer(self._registry, self._address,
self._authkey, self._serializer)
| {
"repo_name": "applio/proto_shmem",
"path": "shared_memory.py",
"copies": "1",
"size": "18492",
"license": "mit",
"hash": -1180574135648953000,
"line_mean": 33.6941838649,
"line_max": 86,
"alpha_frac": 0.5693272767,
"autogenerated": false,
"ratio": 4.0588235294117645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005798253176294495,
"num_lines": 533
} |
from functools import reduce
import nltk.cluster
import numpy
distances = []
def get_clusters_k_means(vectors, k, distance_func, num_of_repeats):
feature_vectors = [numpy.array(f) for f in vectors]
clusterer = nltk.cluster.KMeansClusterer(
num_means=k,
distance=lambda x, y: distance_func(x, y),
repeats=num_of_repeats,
avoid_empty_clusters=True)
return clusterer.cluster(vectors=feature_vectors, assign_clusters=True)
def get_cluster_error(vectors, clusters, num_of_clusters, distance_func):
"""
:param vectors: the feature vectors
:param clusters: the cluster assigned to each feature vector
:param num_of_clusters: the number of clusters contained in the vector_pair
:param distance_func: the distance function used during clustering
:return: a floating point number representing the average error in the cluster configuration. The error for each
cluster is the maximum difference between any two elements
"""
if len(distances) == 0:
for i in range(len(vectors)):
distances.append([])
for j in range(len(vectors)):
distances[i].append(-1)
max_distances = []
for i in range(num_of_clusters):
max_distances.append(0)
for i in range(len(vectors)-1):
for j in range(i+1, len(vectors)):
if clusters[i] == clusters[j]:
distance = distances[i][j] if distances[i][j] != -1 else distance_func(vectors[i], vectors[j])
distances[i][j] = distance
distances[j][i] = distance
if distance > max_distances[clusters[i]]:
max_distances[clusters[i]] = distance
return reduce(lambda x, y: x + y, max_distances) / num_of_clusters
| {
"repo_name": "miromir2217/style_breach",
"path": "style_breach/cluster/clustering.py",
"copies": "1",
"size": "1786",
"license": "apache-2.0",
"hash": 1508933437238472400,
"line_mean": 37,
"line_max": 116,
"alpha_frac": 0.640537514,
"autogenerated": false,
"ratio": 3.9513274336283186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5091864947628318,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
from menpo.transform import Homogeneous, Translation, Scale, NonUniformScale
def dims_3to2():
r"""
Returns
------
:map`Homogeneous`
:map`Homogeneous` that strips off the 3D axis of a 3D shape
leaving just the first two axes.
"""
return Homogeneous(np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]))
def dims_2to3(x=0):
r"""
Return a :map`Homogeneous` that adds on a 3rd axis to a 2D shape.
Parameters
----------
x : `float`, optional
The value that will be assigned to the new third dimension
Returns
------
:map`Homogeneous`
:map`Homogeneous` that adds on a 3rd axis to a 2D shape.
"""
return Homogeneous(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, x],
[0, 0, 1]]))
def model_to_clip_transform(points, xy_scale=0.9, z_scale=0.3):
r"""
Produces an Affine Transform which centres and scales 3D points to fit
into the OpenGL clipping space ([-1, 1], [-1, 1], [1, 1-]). This can be
used to construct an appropriate projection matrix for use in an
orthographic Rasterizer. Note that the z-axis is flipped as is default in
OpenGL - as a result this transform converts the right handed coordinate
input into a left hand one.
Parameters
----------
points: :map:`PointCloud`
The points that should be adjusted.
xy_scale: `float` 0-1, optional
Amount by which the boundary is relaxed so the points are not
right against the edge. A value of 1 means the extremities of the
point cloud will be mapped onto [-1, 1] [-1, 1] exactly (no boarder)
A value of 0.5 means the points will be mapped into the range
[-0.5, 0.5].
Default: 0.9 (map to [-0.9, 0.9])
z_scale: float 0-1, optional
Scale factor by which the z-dimension is squeezed. A value of 1
means the z-range of the points will be mapped to exactly fit in
[1, -1]. A scale of 0.1 means the z-range is compressed to fit in the
range [0.1, -0.1].
Returns
-------
:map:`Affine`
The affine transform that creates this mapping
"""
# 1. Centre the points on the origin
center = Translation(points.centre_of_bounds()).pseudoinverse()
# 2. Scale the points to exactly fit the boundaries
scale = Scale(points.range() / 2.0)
# 3. Apply the relaxations requested - note the flip in the z axis!!
# This is because OpenGL by default evaluates depth as bigger number ==
# further away. Thus not only do we need to get to clip space [-1, 1] in
# all dims) but we must invert the z axis so depth buffering is correctly
# applied.
b_scale = NonUniformScale([xy_scale, xy_scale, -z_scale])
return center.compose_before(scale.pseudoinverse()).compose_before(b_scale)
def clip_to_image_transform(width, height):
r"""
Affine transform that converts 3D clip space coordinates into 2D image
space coordinates. Note that the z axis of the clip space coordinates is
ignored.
Parameters
----------
width: int
The width of the image
height: int
The height of the image
Returns
-------
HomogeneousTransform
A homogeneous transform that moves clip space coordinates into image
space.
"""
# 1. Remove the z axis from the clip space
rem_z = dims_3to2()
# 2. invert the y direction (up becomes down)
invert_y = Scale([1, -1])
# 3. [-1, 1] [-1, 1] -> [0, 2] [0, 2]
t = Translation([1, 1])
# 4. [0, 2] [0, 2] -> [0, 1] [0, 1]
unit_scale = Scale(0.5, n_dims=2)
# 5. [0, 1] [0, 1] -> [0, w - 1] [0, h - 1]
im_scale = Scale([width - 1, height - 1])
# 6. [0, w] [0, h] -> [0, h] [0, w]
xy_yx = Homogeneous(np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]], dtype=np.float))
# reduce the full transform chain to a single affine matrix
transforms = [rem_z, invert_y, t, unit_scale, im_scale, xy_yx]
return reduce(lambda a, b: a.compose_before(b), transforms)
| {
"repo_name": "grigorisg9gr/menpo3d",
"path": "menpo3d/rasterize/transform.py",
"copies": "3",
"size": "4300",
"license": "bsd-3-clause",
"hash": -6152028573756367000,
"line_mean": 32.59375,
"line_max": 79,
"alpha_frac": 0.5818604651,
"autogenerated": false,
"ratio": 3.6502546689303905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 128
} |
from functools import reduce
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from skimage.draw import polygon
LEFT_CLICK = 1
RIGHT_CLICK = 3
# Borrowed from skimage.future
def _mask_from_vertices(vertices, shape, label):
mask = np.zeros(shape, dtype=int)
pr = [y for x, y in vertices]
pc = [x for x, y in vertices]
rr, cc = polygon(pr, pc, shape)
mask[rr, cc] = label
return mask
def _draw_polygon(ax, vertices, alpha=0.4):
polygon = Polygon(vertices, closed=True)
p = PatchCollection([polygon], match_original=True, alpha=alpha)
polygon_object = ax.add_collection(p)
plt.draw()
return polygon_object
def manual_segment(image, alpha=0.4, return_all=False):
"""Return a label image based on freeform selections made with the mouse.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
alpha : float, optional
Transparency value for polygons drawn over the image.
return_all : bool, optional
If True, an array containing each separate polygon drawn is returned.
(The polygons may overlap.) If False (default), latter polygons
"overwrite" earlier ones where they overlap.
Returns
-------
labels : array of int, shape ([Q, ]M, N)
The segmented regions. If mode is `'separate'`, the leading dimension
of the array corresponds to the number of regions that the user drew.
Notes
-----
Press and hold the left mouse button to draw around each object.
Examples
--------
>>> from skimage import data, future, io
>>> camera = data.camera()
>>> mask = future.manual_lasso_segmentation(camera)
>>> io.imshow(mask)
>>> io.show()
"""
list_of_vertex_lists = []
polygons_drawn = []
if image.ndim not in (2, 3):
raise ValueError('Only 2D grayscale or RGB images are supported.')
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
ax.imshow(image, cmap="gray")
ax.set_axis_off()
def _undo(*args, **kwargs):
if list_of_vertex_lists:
list_of_vertex_lists.pop()
# Remove last polygon from list of polygons...
last_poly = polygons_drawn.pop()
# ... then from the plot
last_poly.remove()
fig.canvas.draw_idle()
undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075])
undo_button = matplotlib.widgets.Button(undo_pos, u'\u27F2')
undo_button.on_clicked(_undo)
def _on_lasso_selection(vertices):
if len(vertices) < 3:
return
list_of_vertex_lists.append(vertices)
polygon_object = _draw_polygon(ax, vertices, alpha=alpha)
polygons_drawn.append(polygon_object)
plt.draw()
lasso = matplotlib.widgets.LassoSelector(ax, _on_lasso_selection)
plt.show(block=True)
labels = (_mask_from_vertices(vertices, image.shape[:2], i)
for i, vertices in enumerate(list_of_vertex_lists, start=1))
if return_all:
return np.stack(labels)
else:
return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2]))
| {
"repo_name": "pskeshu/doodle",
"path": "doodle/manual_segment.py",
"copies": "1",
"size": "3240",
"license": "mit",
"hash": 7061240301488484000,
"line_mean": 29,
"line_max": 78,
"alpha_frac": 0.6361111111,
"autogenerated": false,
"ratio": 3.737024221453287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4873135332553287,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
import tensorflow as tf
# TODO: check the methods of _TensorLike
class TensorTrainBase(object):
"""An abstract class that represents a collection of Tensor Train cores.
"""
def __init__(self, tt_cores):
"""Creates a `TensorTrainBase`."""
pass
def get_raw_shape(self):
"""Get tuple of `TensorShapes` representing the shapes of the underlying TT-tensor.
Tuple contains one `TensorShape` for TT-tensor and 2 `TensorShapes` for
TT-matrix
Returns:
A tuple of `TensorShape` objects.
"""
return self._raw_shape
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
raw_shape = self.get_raw_shape()
if self.is_tt_matrix():
# Use python prod instead of np.prod to avoid overflows.
prod_f = lambda arr: reduce(lambda x, y: x*y, arr)
# TODO: as list is not available if shape is partly known.
m = prod_f(raw_shape[0].as_list())
n = prod_f(raw_shape[1].as_list())
return tf.TensorShape((m, n))
else:
return raw_shape[0]
@property
def tt_cores(self):
"""A tuple of TT-cores."""
return self._tt_cores
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
# TODO: where is this created?
return self.tt_cores[0].dtype
@property
def name(self):
"""The name of the TensorTrain.
Returns:
String, the scope in which the TT-cores are defined.
"""
core_name = self.tt_cores[0].name
idx = core_name.rfind('/')
return core_name[:idx]
@property
def graph(self):
"""The `Graph` that contains the tt_cores tensors."""
# TODO: check in init that the other cores are from the same graph.
return self.tt_cores[0].graph
def __str__(self):
"""A string describing the TensorTrain object, its TT-rank and shape."""
return NotImplementedError
def ndims(self):
"""Get the number of dimensions of the underlying TT-tensor.
Returns:
A number.
"""
return len(self.tt_cores)
def get_tt_ranks(self):
"""Get the TT-ranks in an array of size `num_dims`+1.
The first and the last TT-rank are guarantied to be 1.
Returns:
TensorShape of size `num_dims`+1.
"""
return self._tt_ranks
def is_tt_matrix(self):
"""Returns True if the TensorTrain object represents a TT-matrix."""
return len(self.get_raw_shape()) == 2
def is_variable(self):
"""True if the TensorTrain object is a variable (e.g. is trainable)."""
return isinstance(self.tt_cores[0], tf.Variable)
@property
def op(self):
"""The `Operation` that evaluates all the cores."""
return tf.group(*[c.op for c in self.tt_cores])
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
"""
# TODO: implement feed_dict
if session is None:
session = tf.get_default_session()
session.run(self.tt_cores)
# TODO: do we need this?
# @staticmethod
# def _override_operator(operator, func):
# _override_helper(SparseTensor, operator, func)
def __add__(self, other):
"""Returns a TensorTrain corresponding to element-wise sum tt_a + tt_b.
Supports broadcasting (e.g. you can add TensorTrainBatch and TensorTrain).
Just calls t3f.add, see its documentation for details.
"""
# TODO: ugly.
# We can't import ops in the beginning since it creates cyclic dependencies.
from t3f import ops
return ops.add(self, other)
def __sub__(self, other):
"""Returns a TensorTrain corresponding to element-wise difference tt_a - tt_b.
Supports broadcasting (e.g. you can subtract TensorTrainBatch and
TensorTrain).
Just calls t3f.add(self, (-1) * other), see its documentation for details.
"""
# TODO: ugly.
# We can't import ops in the beginning since it creates cyclic dependencies.
from t3f import ops
return ops.add(self, ops.multiply(other, -1.))
def __neg__(self):
"""Returns a TensorTrain corresponding to element-wise negative -tt_a.
Just calls t3f.multiply(self, -1.), see its documentation for details.
"""
# TODO: ugly.
# We can't import ops in the beginning since it creates cyclic dependencies.
from t3f import ops
return ops.multiply(self, -1.)
def __mul__(self, other):
"""Returns a TensorTrain corresponding to element-wise product tt_a * tt_b.
Supports broadcasting (e.g. you can multiply TensorTrainBatch and
TensorTrain).
Just calls t3f.multiply, see its documentation for details.
"""
# TODO: ugly.
# We can't import ops in the beginning since it creates cyclic dependencies.
from t3f import ops
return ops.multiply(self, other)
# To support 'TT * 4' as well as '4 * TT'.
__rmul__ = __mul__
| {
"repo_name": "Bihaqo/t3f",
"path": "t3f/tensor_train_base.py",
"copies": "1",
"size": "5565",
"license": "mit",
"hash": -892522016806643000,
"line_mean": 30.0893854749,
"line_max": 87,
"alpha_frac": 0.6603773585,
"autogenerated": false,
"ratio": 3.7986348122866893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959012170786689,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
import time
from scarab import EncryptedArray, EncryptedBit, \
PrivateKey, PublicKey, generate_pair
_ADD = lambda a, b: a + b
_MUL = lambda a, b: a * b
_AND = lambda a, b: a & b
_XOR = lambda a, b: a ^ b
pk, sk = None, None
def binary(num, size=32):
"""Binary representation of an integer as a list of 0, 1
>>> binary(10, 8)
[0, 0, 0, 0, 1, 0, 1, 0]
:param num:
:param size: size (pads with zeros)
:return: the binary representation of num
"""
ret = np.zeros(size, dtype=np.int)
n = np.array([int(x) for x in list(bin(num)[2:])])
ret[ret.size - n.size:] = n
return ret
def gamma(cq, ci, co):
"""
Calculates the value of the gamma function, as described in PDF (paragraph 3.1.2)
:param cq: cipher query
:param ci: cipher index
:param co: cipher one
:return: the value of the gamma function
"""
return reduce(_AND, [a ^ b ^ co for a, b in zip(cq, ci)])
def R(gammas, column, public_key):
"""
Calculates the value of R() function, as described in PDF (paragraph 3.1.3)
:param gammas: gammas
:param column: column
:param public_key: public key
:return: the value of the R function
"""
return reduce(_XOR, gammas[np.where(column == 1)], public_key.encrypt(0))
# #######
RECORD_SIZE = 3
RECORD_COUNT = 5
database = np.array([[1] * min(RECORD_SIZE, x) + [0] * max(0, RECORD_SIZE - x) for x in range(RECORD_COUNT)])
########
def server_generate_response(cipher_query, pk):
indices = [binary(x) for x in range(RECORD_COUNT)]
cipher_indices = [pk.encrypt(index) for index in indices]
cipher_one = pk.encrypt(1)
gammas = np.array([gamma(cipher_query, ci, cipher_one) for ci in cipher_indices])
tmp = []
for c in range(RECORD_SIZE):
foobar = database[:, c]
tmp.append(R(gammas, foobar, pk))
return np.array(tmp)
def serialize_and_deserialize(cipher_query, public_key):
serialized_query = str(cipher_query)
serialized_public_key = str(public_key)
deserialized_public_key = PublicKey(serialized_public_key)
deserialized_query = EncryptedArray(len(cipher_query), deserialized_public_key, serialized_query)
enc_data = server_generate_response(deserialized_query, deserialized_public_key)
s_bits = [str(bit) for bit in enc_data]
deserialized_enc_data = [EncryptedBit(public_key, bit) for bit in s_bits]
return deserialized_enc_data
def client_perform_query(i):
response = serialize_and_deserialize(pk.encrypt(binary(i)), pk)
result = [sk.decrypt(r) for r in response]
return result
########
########
########
if __name__ == '__main__':
print('Database size:', RECORD_COUNT, 'x', RECORD_SIZE)
a = time.clock()
pk, sk = generate_pair()
b = time.clock()
print('keys were generated in', (b - a), 'seconds')
a = time.clock()
row = client_perform_query(1)
b = time.clock()
print('response generated in', (b - a), 'seconds')
print(row)
| {
"repo_name": "blindstore/blindstore-old-scarab",
"path": "pir.py",
"copies": "1",
"size": "3021",
"license": "mit",
"hash": 6609702121587240000,
"line_mean": 28.3300970874,
"line_max": 109,
"alpha_frac": 0.6338960609,
"autogenerated": false,
"ratio": 3.2310160427807486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43649121036807487,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
from tools.utils import flatten
import acpc_python_client as acpc
def get_utility(hole_cards, board_cards, players_folded, pot_commitment):
num_players = len(players_folded)
flattened_board_cards = flatten(board_cards)
complete_hands = [
flatten(hole_cards[i], flattened_board_cards) if not players_folded[i] else None
for i in range(num_players)]
winners = get_winners(complete_hands)
winner_count = len(winners)
pot_size = np.sum(pot_commitment)
value_per_winner = pot_size / winner_count
utilities = np.array(pot_commitment) * -1
for winner in winners:
utilities[winner] += value_per_winner
return utilities
def get_winners(hands):
"""Evaluate hands of players and determine winners.
!!! This function is currently only capable of evaluating hands that contain up to 5 cards. !!!
Args:
hands (list(list(int))): List which contains player's hands. Each player's hand is a list of integers
that represent player's cards. Board cards must be included in each player's hand.
Returns:
list(int): Indexes of winners. The pot should be split evenly between all winners.
"""
scores = [(i, _score(hand) if hand else ((0,), (0,)))
for i, hand in enumerate(hands)]
sorted_scores = sorted(scores, key=lambda x: x[1])
winning_score = sorted_scores[-1][1]
winner_count = 1
for i in range(len(hands) - 2, 0 - 1, -1):
if sorted_scores[-i][1] == winning_score:
winner_count += 1
else:
break
return [score[0] for score in sorted_scores[len(hands) - winner_count:]]
def _parse_hand(hand):
return map(lambda card: (acpc.game_utils.card_rank(card), acpc.game_utils.card_suit(card)), hand)
def _score(hand):
if len(hand) <= 5:
return _score_hand_combination(_parse_hand(hand))
else:
# TODO create multiple 5 card combinations from longer hand to allow Texas Hold'em hand evaluation
raise AttributeError(
'Only games with up to 5 cards in hand are supported')
def _score_hand_combination(hand):
rank_counts = {r: reduce(lambda count, card: count + (card[0] == r), hand, 0)
for r, _ in hand}.items()
score, ranks = zip(*sorted((cnt, rank) for rank, cnt in rank_counts)[::-1])
if len(score) == 5:
if ranks[0:2] == (12, 3): # adjust if 5 high straight
ranks = (3, 2, 1, 0, -1)
straight = ranks[0] - ranks[4] == 4
flush = len({suit for _, suit in hand}) == 1
score = ([(1,), (3, 1, 1, 1)], [(3, 1, 1, 2), (5,)])[flush][straight]
return score, ranks
| {
"repo_name": "JakubPetriska/poker-cfr",
"path": "tools/hand_evaluation.py",
"copies": "1",
"size": "2731",
"license": "mit",
"hash": 5624747605925258000,
"line_mean": 36.4109589041,
"line_max": 115,
"alpha_frac": 0.6195532772,
"autogenerated": false,
"ratio": 3.396766169154229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4516319446354229,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
# Qubit representation on a Ket or Bra state. Allows to create only
# basis qubits |0> or |1>. By performing operations defined operations
# it's possible to get other qubits or qubit registers.
class Qubit:
KET = True
BRA = False
# Creates a new qubit |n> or <n| where n is one or zero
def __init__(self, n, state=KET):
if state != Qubit.KET and state != Qubit.BRA:
raise ValueError("State must be either KET or BRA")
self.vector = np.matrix([[1], [0]] if n == 0 else [[0], [1]])
self.state = state
# Private helpler method to create a new qubit or qubit register from a
# vector
def __new(self, vector, state=KET):
q = Qubit(1, state)
q.vector = vector
return q
# Computes the conjugate of a qubit
def conjugate(self):
return self.__new(np.transpose(np.conjugate(self.vector)),
not self.state)
# Tensor of the qubit with another one
def tensor(self, other):
return self.__new(np.kron(self.vector, other.vector), self.state)
# Applies the given gate
def apply_gate(self, gate):
if self.state != Qubit.KET:
raise ValueError("State must be a Ket")
return self.__new(gate.matrix * self.vector)
# Performs the tensor product of a given list of qubits to create a
# qubit register
def to_register(qubits):
return reduce(lambda acc, q: acc.tensor(q), qubits)
# Performs the inner product <self|other> of the qubit with another qubit
def inner(self, other):
if self.state != Qubit.KET and other.state != Qubit.KET:
raise ValueError("Both qubits must be kets")
return (self.conjugate().vector * other.vector)[0, 0]
# Performs the outer product |self><other| of the qubit with another qubit
def outer(self, other):
if self.state != Qubit.KET and other.state != Qubit.KET:
raise ValueError("Both qubits must be kets")
return self.vector * other.conjugate().vector
# Adds two qubits
def __add__(self, other):
return self.__operation(np.add, other)
# Subtracts two qubits
def __sub__(self, other):
return self.__operation(np.subtract, other)
# Negates a qubit
def __neg__(self):
return self.__new(-self.vector, self.state)
# Multiplies two qubits. If the argument is an int or a float, it performs
# a multiplication by a scalar. If it's another qubit, it performs the
# tensor product
def __mul__(self, other):
if isinstance(other, Qubit):
if self.state != Qubit.KET or other.state != Qubit.KET:
raise ValueError("Both qubits have to be kets")
return self.tensor(other)
elif isinstance(other, int) or isinstance(other, float):
return self.__new(other * self.vector, state = Qubit.KET)
else:
raise ValueError("* Qubit undefined for " + str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
# Private method that applies the given operation between the qubit and the
# other qubit
def __operation(self, operation, other):
if self.state != other.state:
raise ValueError("Both qubits must be on the same state")
return self.__new(operation(self.vector, other.vector), self.state)
# Vector representation of the qubit
def __repr__(self):
v = self.vector if self.state == Qubit.BRA else np.transpose(self.vector)
return repr(v)
q0 = Qubit(0)
q1 = Qubit(1)
| {
"repo_name": "miguelfrde/pyquantum",
"path": "pyquantum/qubit.py",
"copies": "1",
"size": "3624",
"license": "mit",
"hash": -201153547473917200,
"line_mean": 35.9795918367,
"line_max": 81,
"alpha_frac": 0.6261037528,
"autogenerated": false,
"ratio": 3.7093142272262027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48354179800262026,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy
from pyscf import gto, scf, mcscf
'''
Compare two CASSCF active space.
It's important to compare multi-reference calculations based on the comparable
reference states. Here we compute the SVD eig and the determinant value of
the CAS space overlap to measure how close two CASSCF results are. If two
CASSCF are close, the SVD eig should be close 1
'''
mol1 = gto.M(atom='O 0 0 0; O 0 0 1', basis='ccpvtz', spin=2, symmetry=1)
mf = scf.RHF(mol1)
mf.kernel()
mc = mcscf.CASSCF(mf, 7, 4)
mc.kernel()
mo1core = mc.mo_coeff[:,:mc.ncore]
mo1cas = mc.mo_coeff[:,mc.ncore:mc.ncore+mc.ncas]
mol2 = gto.M(atom='O 0 0 0; O 0 0 1', basis='ccpvdz', spin=2, symmetry=1)
mf = scf.RHF(mol2)
mf.kernel()
mc = mcscf.CASSCF(mf, 7, (2,2))
mc.kernel()
mo2core = mc.mo_coeff[:,:mc.ncore]
mo2cas = mc.mo_coeff[:,mc.ncore:mc.ncore+mc.ncas]
s = gto.intor_cross('cint1e_ovlp_sph', mol1, mol2)
score = reduce(numpy.dot, (mo1core.T, s, mo2core))
scas = reduce(numpy.dot, (mo1cas.T, s, mo2cas))
numpy.set_printoptions(4)
print('<core1|core2> SVD eig = %s' % numpy.linalg.svd(score)[1])
print('det(<core1|core2>) = %s' % numpy.linalg.det(score))
print('<CAS1|CAS2> SVD eig = %s' % numpy.linalg.svd(scas)[1])
print('det(<CAS1|CAS2>) = %s' % numpy.linalg.det(scas))
| {
"repo_name": "sunqm/pyscf",
"path": "examples/mcscf/42-compare_cas_space.py",
"copies": "1",
"size": "1288",
"license": "apache-2.0",
"hash": -1202593119262572000,
"line_mean": 33.8108108108,
"line_max": 78,
"alpha_frac": 0.6886645963,
"autogenerated": false,
"ratio": 2.430188679245283,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3618853275545283,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator as op
import numpy as np
def interp1(x, xp, fp):
"""
Interpolate the 1d piecewise linear interpolant to a function with given
values at discrete data points
:param x: The x-coordinates of the interpolated values
:param xp: The x-coordinates of the data points
:param fp: the y-coordinates of the data points. Multiple functions can be
interpolated, as long as they lie along the last dimension of
the array.
:return: the interpolated values
"""
# numpy.interp only accepts 1-D sequences, so all the input data will need
# to be reshaped
if isinstance(x, np.ndarray) and x.ndim > 1:
x = np.reshape(x, -1)
if isinstance(xp, np.ndarray) and xp.ndim > 1:
xp = np.reshape(xp, -1)
y_shape = (*fp.shape[:-1], len(x))
y = np.zeros(y_shape)
for i_row in np.ndindex(fp.shape[:-1]):
y[i_row] = np.interp(x, xp, fp[i_row])
return y
def is_between(x, a, b, inclusive=(True, True)):
"""Determine the elements that are between a and b"""
ops = tuple(np.less_equal if inc else np.less for inc in inclusive)
return np.logical_and(ops[0](a, x), ops[1](x, b))
def product(sequence):
"""
Computes the product of all elements in the sequence
:param sequence: an iterable containing things that can be multiplied
:return: the product
"""
return reduce(op.mul, sequence, 1)
def to_matrix(array: np.ndarray):
"""reshapes an array so that it is 2-dimensional"""
if array.ndim == 1:
return array.reshape(1, array.size)
elif array.ndim == 2:
return array.reshape(array.shape)
else:
return array.reshape(product(array.shape[:-1]), -1)
| {
"repo_name": "hoogamaphone/chromathicity",
"path": "chromathicity/math.py",
"copies": "1",
"size": "1762",
"license": "bsd-3-clause",
"hash": 2118405404469277400,
"line_mean": 29.9122807018,
"line_max": 78,
"alpha_frac": 0.6424517594,
"autogenerated": false,
"ratio": 3.5885947046843176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47310464640843175,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.crypto import get_random_string
from accounts.models import User
from zentral.contrib.inventory.models import Taxonomy
from zentral.contrib.jamf.models import JamfInstance, TagConfig
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class JamfSetupViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# user
cls.user = User.objects.create_user("godzilla", "godzilla@zentral.io", get_random_string())
cls.group = Group.objects.create(name=get_random_string())
cls.user.groups.set([cls.group])
# utility methods
def _login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def _login(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
self.client.force_login(self.user)
def _force_jamf_instance(self):
return JamfInstance.objects.create(
host="{}.example.com".format(get_random_string(12)),
port=443,
path="/JSSResource",
user=get_random_string(),
password=get_random_string(),
)
def _force_tag_config(self):
jamf_instance = self._force_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
return TagConfig.objects.create(instance=jamf_instance,
source="GROUP",
taxonomy=t,
regex=r"^YOLOFOMO: (.*)$",
replacement=r"\1")
# jamf instances
def test_jamf_instances_redirect(self):
self._login_redirect(reverse("jamf:jamf_instances"))
def test_jamf_instances_permission_denied(self):
self._login()
response = self.client.get(reverse("jamf:jamf_instances"))
self.assertEqual(response.status_code, 403)
def test_jamf_instances_view(self):
self._login("jamf.view_jamfinstance")
response = self.client.get(reverse("jamf:jamf_instances"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "jamf/jamfinstance_list.html")
self.assertContains(response, "0 jamf instances")
# create jamf instance
def test_create_jamf_instance_redirect(self):
self._login_redirect(reverse("jamf:create_jamf_instance"))
def test_create_jamf_instance_permission_denied(self):
self._login()
response = self.client.get(reverse("jamf:create_jamf_instance"))
self.assertEqual(response.status_code, 403)
def test_create_jamf_instance_get(self):
self._login("jamf.add_jamfinstance")
response = self.client.get(reverse("jamf:create_jamf_instance"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "jamf/jamfinstance_form.html")
self.assertContains(response, "Create jamf instance")
def test_create_jamf_instance_post(self):
self._login("jamf.add_jamfinstance", "jamf.view_jamfinstance", "jamf.view_tagconfig")
response = self.client.post(reverse("jamf:create_jamf_instance"),
{"host": "yo.example.com",
"port": 8443,
"path": "/JSSResource",
"user": "godzilla",
"password": "pwd",
"inventory_apps_shard": 86},
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "0 Tag configs")
jamf_instance = response.context["object"]
self.assertEqual(jamf_instance.version, 0)
self.assertContains(response, "https://yo.example.com:8443/JSSResource")
self.assertContains(response, "godzilla")
self.assertNotContains(response, "pwd")
# delete jamf instance
def test_delete_jamf_instance_redirect(self):
jamf_instance = self._force_jamf_instance()
self._login_redirect(reverse("jamf:delete_jamf_instance", args=(jamf_instance.pk,)))
def test_delete_jamf_instance_permission_denied(self):
jamf_instance = self._force_jamf_instance()
self._login()
response = self.client.get(reverse("jamf:delete_jamf_instance", args=(jamf_instance.pk,)))
self.assertEqual(response.status_code, 403)
def test_delete_jamf_instance_get(self):
jamf_instance = self._force_jamf_instance()
self._login("jamf.delete_jamfinstance")
response = self.client.get(reverse("jamf:delete_jamf_instance", args=(jamf_instance.pk,)))
self.assertContains(response, "Delete jamf instance")
# TODO: def test_delete_jamf_instance_post(self):
# PB: API calls!
# setup jamf instance
def test_setup_jamf_instance_redirect(self):
jamf_instance = self._force_jamf_instance()
self._login_redirect(reverse("jamf:setup_jamf_instance", args=(jamf_instance.pk,)))
def test_setup_jamf_instance_permission_denied(self):
jamf_instance = self._force_jamf_instance()
self._login()
response = self.client.get(reverse("jamf:setup_jamf_instance", args=(jamf_instance.pk,)))
self.assertEqual(response.status_code, 403)
# update jamf instance
def test_update_jamf_instance_redirect(self):
jamf_instance = self._force_jamf_instance()
self._login_redirect(reverse("jamf:update_jamf_instance", args=(jamf_instance.pk,)))
def test_update_jamf_instance_permission_denied(self):
jamf_instance = self._force_jamf_instance()
self._login()
response = self.client.get(reverse("jamf:update_jamf_instance", args=(jamf_instance.pk,)))
self.assertEqual(response.status_code, 403)
def test_update_jamf_instance_get(self):
jamf_instance = self._force_jamf_instance()
self._login("jamf.change_jamfinstance")
response = self.client.get(reverse("jamf:update_jamf_instance", args=(jamf_instance.pk,)))
self.assertContains(response, "Update jamf instance")
def test_update_jamf_instance_post(self):
jamf_instance = self._force_jamf_instance()
self._login("jamf.change_jamfinstance", "jamf.view_jamfinstance", "jamf.view_tagconfig")
response = self.client.post(reverse("jamf:update_jamf_instance", args=(jamf_instance.pk,)),
{"host": "yo.example2.com",
"port": 8443,
"path": "/JSSResource",
"user": "godzilla",
"password": "pwd",
"inventory_apps_shard": 12},
follow=True)
self.assertTemplateUsed(response, "jamf/jamfinstance_detail.html")
self.assertContains(response, "0 Tag configs")
self.assertContains(response, "https://yo.example2.com:8443/JSSResource")
jamf_instance2 = response.context["object"]
self.assertEqual(jamf_instance, jamf_instance2)
self.assertEqual(jamf_instance2.version, 1)
self.assertEqual(jamf_instance2.inventory_apps_shard, 12)
# create tag config
def test_create_tag_config_redirect(self):
jamf_instance = self._force_jamf_instance()
self._login_redirect(reverse("jamf:create_tag_config", args=(jamf_instance.pk,)))
def test_create_tag_config_permission_denied(self):
jamf_instance = self._force_jamf_instance()
self._login()
response = self.client.get(reverse("jamf:create_tag_config", args=(jamf_instance.pk,)))
self.assertEqual(response.status_code, 403)
def test_create_tag_config_permission_get(self):
jamf_instance = self._force_jamf_instance()
self._login("jamf.add_tagconfig")
response = self.client.get(reverse("jamf:create_tag_config", args=(jamf_instance.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "jamf/tagconfig_form.html")
def test_create_tag_config(self):
jamf_instance = self._force_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: (.*)$"
self._login("jamf.add_tagconfig", "jamf.view_jamfinstance", "jamf.view_tagconfig")
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.pk,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
self.assertTemplateUsed(response, "jamf/jamfinstance_detail.html")
self.assertContains(response, "1 Tag config")
self.assertContains(response, t.name)
def test_create_tag_config_error(self):
jamf_instance = self._force_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: ("
self._login("jamf.add_tagconfig")
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.pk,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
self.assertTemplateUsed(response, "jamf/tagconfig_form.html")
self.assertContains(response, "Not a valid regex")
# update tag config
def test_update_tag_config_redirect(self):
tag_config = self._force_tag_config()
self._login_redirect(reverse("jamf:create_tag_config", args=(tag_config.instance.pk,)))
def test_update_tag_config_permission_denied(self):
tag_config = self._force_tag_config()
self._login()
response = self.client.get(reverse("jamf:create_tag_config", args=(tag_config.instance.pk,)))
self.assertEqual(response.status_code, 403)
def test_update_tag_config(self):
tag_config = self._force_tag_config()
jamf_instance = tag_config.instance
self._login("jamf.change_tagconfig", "jamf.view_jamfinstance", "jamf.view_tagconfig")
response = self.client.post(reverse("jamf:update_tag_config", args=(jamf_instance.pk, tag_config.pk)),
{"source": "GROUP",
"taxonomy": tag_config.taxonomy.pk,
"regex": tag_config.regex,
"replacement": r"haha: \1"},
follow=True)
self.assertTemplateUsed(response, "jamf/jamfinstance_detail.html")
self.assertContains(response, "1 Tag config")
self.assertContains(response, "haha")
# delete tag config
def test_delete_tag_config_redirect(self):
tag_config = self._force_tag_config()
jamf_instance = tag_config.instance
self._login_redirect(reverse("jamf:delete_tag_config", args=(jamf_instance.pk, tag_config.pk)))
def test_delete_tag_config_permission_denied(self):
tag_config = self._force_tag_config()
jamf_instance = tag_config.instance
self._login()
response = self.client.get(reverse("jamf:delete_tag_config", args=(jamf_instance.pk, tag_config.pk)))
self.assertEqual(response.status_code, 403)
def test_delete_tag_config(self):
tag_config = self._force_tag_config()
jamf_instance = tag_config.instance
self._login("jamf.delete_tagconfig", "jamf.view_jamfinstance", "jamf.view_tagconfig")
response = self.client.post(reverse("jamf:delete_tag_config", args=(jamf_instance.pk, tag_config.pk)),
follow=True)
self.assertTemplateUsed(response, "jamf/jamfinstance_detail.html")
self.assertContains(response, "0 Tag configs")
| {
"repo_name": "zentralopensource/zentral",
"path": "tests/jamf/test_jamf_setup_views.py",
"copies": "1",
"size": "12926",
"license": "apache-2.0",
"hash": 6094893842579865000,
"line_mean": 45.6642599278,
"line_max": 110,
"alpha_frac": 0.6045180257,
"autogenerated": false,
"ratio": 3.8619659396474453,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4966483965347445,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.test import TestCase
from accounts.models import User
from zentral.contrib.inventory.models import MetaBusinessUnit
from zentral.contrib.monolith.models import Manifest
class MonolithSetupViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# user
cls.user = User.objects.create_user("godzilla", "godzilla@zentral.io", get_random_string())
cls.group = Group.objects.create(name=get_random_string())
cls.user.groups.set([cls.group])
# mbu
cls.mbu = MetaBusinessUnit.objects.create(name=get_random_string(64))
cls.mbu.create_enrollment_business_unit()
# manifest
cls.manifest = Manifest.objects.create(meta_business_unit=cls.mbu, name=get_random_string())
# utility methods
def _login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def _login(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
self.client.force_login(self.user)
# pkg infos
def test_pkg_infos_login_redirect(self):
self._login_redirect(reverse("monolith:pkg_infos"))
def test_pkg_infos_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:pkg_infos"))
self.assertEqual(response.status_code, 403)
def test_pkg_infos(self):
self._login("monolith.view_pkginfo")
response = self.client.get(reverse("monolith:pkg_infos"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/pkg_info_list.html")
# PPDs
def test_ppds_login_redirect(self):
self._login_redirect(reverse("monolith:ppds"))
def test_ppds_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:ppds"))
self.assertEqual(response.status_code, 403)
def test_ppds(self):
self._login("monolith.view_printerppd")
response = self.client.get(reverse("monolith:ppds"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/printerppd_list.html")
# catalogs
def test_catalogs_login_redirect(self):
self._login_redirect(reverse("monolith:catalogs"))
def test_catalogs_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:catalogs"))
self.assertEqual(response.status_code, 403)
def test_catalogs(self):
self._login("monolith.view_catalog")
response = self.client.get(reverse("monolith:catalogs"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/catalog_list.html")
# conditions
def test_conditions_login_redirect(self):
self._login_redirect(reverse("monolith:conditions"))
def test_conditions_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:conditions"))
self.assertEqual(response.status_code, 403)
def test_conditions(self):
self._login("monolith.view_condition")
response = self.client.get(reverse("monolith:conditions"))
self.assertEqual(response.status_code, 200)
# sub manifests
def test_sub_manifests_login_redirect(self):
self._login_redirect(reverse("monolith:sub_manifests"))
def test_sub_manifests_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:sub_manifests"))
self.assertEqual(response.status_code, 403)
def test_sub_manifests(self):
self._login("monolith.view_submanifest")
response = self.client.get(reverse("monolith:sub_manifests"))
self.assertEqual(response.status_code, 200)
# create submanifest
def test_create_submanifest_redirect(self):
self._login_redirect(reverse("monolith:create_sub_manifest"))
def test_create_submanifest_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:create_sub_manifest"))
self.assertEqual(response.status_code, 403)
def test_create_submanifest_get(self):
self._login("monolith.add_submanifest")
response = self.client.get(reverse("monolith:create_sub_manifest"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/edit_sub_manifest.html")
def test_create_submanifest_post(self):
self._login("monolith.add_submanifest", "monolith.view_submanifest")
name = get_random_string()
response = self.client.post(reverse("monolith:create_sub_manifest"),
{"name": name},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/sub_manifest.html")
self.assertEqual(response.context["object"].name, name)
# manifests
def test_manifests_login_redirect(self):
self._login_redirect(reverse("monolith:manifests"))
def test_manifests_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:manifests"))
self.assertEqual(response.status_code, 403)
def test_manifests(self):
self._login("monolith.view_manifest")
response = self.client.get(reverse("monolith:manifests"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/manifest_list.html")
self.assertEqual(response.context["object_list"][0], self.manifest)
# manifest
def test_manifest_login_redirect(self):
self._login_redirect(reverse("monolith:manifest", args=(self.manifest.pk,)))
def test_manifest_permission_denied(self):
self._login()
response = self.client.get(reverse("monolith:manifest", args=(self.manifest.pk,)))
self.assertEqual(response.status_code, 403)
def test_manifest(self):
self._login("monolith.view_manifest")
response = self.client.get(reverse("monolith:manifest", args=(self.manifest.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "monolith/manifest.html")
self.assertEqual(response.context["object"], self.manifest)
| {
"repo_name": "zentralopensource/zentral",
"path": "tests/monolith/test_setup_views.py",
"copies": "1",
"size": "7005",
"license": "apache-2.0",
"hash": 2761206418430039000,
"line_mean": 37.489010989,
"line_max": 100,
"alpha_frac": 0.6663811563,
"autogenerated": false,
"ratio": 3.825778263244129,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4992159419544129,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
from urllib.parse import urljoin
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.cache import cache
from django.db import models
from django.db.models.aggregates import Sum
from django.db.models.functions import Coalesce
from django.utils import timezone
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
try:
from django_elasticsearch_dsl.registries import registry as elasticsearch_registry
except ImportError:
elasticsearch_registry = type('DocumentRegistry', (), {'get_documents': lambda *args: []})()
from polymorphic.managers import PolymorphicManager
from polymorphic.models import PolymorphicModel
from shop import deferred
from shop.conf import app_settings
from shop.exceptions import ProductNotAvailable
class Availability:
"""
Contains the currently available quantity for a given product and period.
"""
def __init__(self, **kwargs):
"""
:param earliest:
Point in time from when on this product will be available.
:param latest:
Point in time until this product will be available.
:param quantity:
Number of available items. The type of this value is the same as the type of ``quantity``
in :class:`shop.models.cart.CartItemModel`.
:param sell_short:
If ``True``, sell the product even though it's not in stock. It then will be shipped
at the point in time specified by ``earliest``.
:param limited_offer:
If ``True``, sell the product until the point in time specified by ``latest``. After
that period, the product will not be available anymore.
"""
tzinfo = timezone.get_current_timezone()
self.earliest = kwargs.get('earliest', timezone.datetime.min.replace(tzinfo=tzinfo))
self.latest = kwargs.get('latest', timezone.datetime.max.replace(tzinfo=tzinfo))
quantity = kwargs.get('quantity', app_settings.MAX_PURCHASE_QUANTITY)
self.quantity = min(quantity, app_settings.MAX_PURCHASE_QUANTITY)
self.sell_short = bool(kwargs.get('sell_short', False))
self.limited_offer = bool(kwargs.get('limited_offer', False))
self.inventory = bool(kwargs.get('inventory', None))
class AvailableProductMixin:
"""
Add this mixin class to the product models declaration, wanting to keep track on the
current amount of products in stock. In comparison to
:class:`shop.models.product.ReserveProductMixin`, this mixin does not reserve items in pending
carts, with the risk for overselling. It thus is suited for products kept in the cart
for a long period.
The product class must implement a field named ``quantity`` accepting numerical values.
"""
def get_availability(self, request, **kwargs):
"""
Returns the current available quantity for this product.
If other customers have pending carts containing this same product, the quantity
is not not adjusted. This may result in a situation, where someone adds a product
to the cart, but then is unable to purchase, because someone else bought it in the
meantime.
"""
return Availability(quantity=self.quantity)
def deduct_from_stock(self, quantity, **kwargs):
if quantity > self.quantity:
raise ProductNotAvailable(self)
self.quantity -= quantity
self.save(update_fields=['quantity'])
def managed_availability(self):
return True
@classmethod
def check(cls, **kwargs):
from shop.models.cart import CartItemModel
errors = super().check(**kwargs)
for cart_field in CartItemModel._meta.fields:
if cart_field.attname == 'quantity':
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(CartItemModel.__name__)))
for field in cls._meta.fields:
if field.attname == 'quantity':
if field.get_internal_type() != cart_field.get_internal_type():
msg = "Field `{}.quantity` must be of same type as `{}.quantity`."
errors.append(checks.Error(msg.format(cls.__name__, CartItemModel.__name__)))
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
class BaseReserveProductMixin:
def get_availability(self, request, **kwargs):
"""
Returns the current available quantity for this product.
If other customers have pending carts containing this same product, the quantity
is adjusted accordingly. Therefore make sure to invalidate carts, which were not
converted into an order after a determined period of time. Otherwise the quantity
returned by this function might be considerably lower, than what it could be.
"""
from shop.models.cart import CartItemModel
availability = super().get_availability(request, **kwargs)
cart_items = CartItemModel.objects.filter(product=self).values('quantity')
availability.quantity -= cart_items.aggregate(sum=Coalesce(Sum('quantity'), 0))['sum']
return availability
class ReserveProductMixin(BaseReserveProductMixin, AvailableProductMixin):
"""
Add this mixin class to the product models declaration, wanting to keep track on the
current amount of products in stock. In comparison to
:class:`shop.models.product.AvailableProductMixin`, this mixin reserves items in pending
carts, without the risk for overselling. On the other hand, the shop may run out of sellable
items, if customers keep products in the cart for a long period, without proceeding to checkout.
Use this mixin for products kept for a short period until checking out the cart, for
instance for ticket sales. Ensure that pending carts are flushed regularly.
The product class must implement a field named ``quantity`` accepting numerical values.
"""
class BaseProductManager(PolymorphicManager):
"""
A base ModelManager for all non-object manipulation needs, mostly statistics and querying.
"""
def select_lookup(self, search_term):
"""
Returning a queryset containing the products matching the declared lookup fields together
with the given search term. Each product can define its own lookup fields using the
member list or tuple `lookup_fields`.
"""
filter_by_term = (models.Q((sf, search_term)) for sf in self.model.lookup_fields)
queryset = self.get_queryset().filter(reduce(operator.or_, filter_by_term))
return queryset
def indexable(self):
"""
Return a queryset of indexable Products.
"""
queryset = self.get_queryset().filter(active=True)
return queryset
class PolymorphicProductMetaclass(deferred.PolymorphicForeignKeyBuilder):
@classmethod
def perform_meta_model_check(cls, Model):
"""
Perform some safety checks on the ProductModel being created.
"""
if not isinstance(Model.objects, BaseProductManager):
msg = "Class `{}.objects` must provide ModelManager inheriting from BaseProductManager"
raise NotImplementedError(msg.format(Model.__name__))
if not isinstance(getattr(Model, 'lookup_fields', None), (list, tuple)):
msg = "Class `{}` must provide a tuple of `lookup_fields` so that we can easily lookup for Products"
raise NotImplementedError(msg.format(Model.__name__))
if not callable(getattr(Model, 'get_price', None)):
msg = "Class `{}` must provide a method implementing `get_price(request)`"
raise NotImplementedError(msg.format(cls.__name__))
class BaseProduct(PolymorphicModel, metaclass=PolymorphicProductMetaclass):
"""
An abstract basic product model for the shop. It is intended to be overridden by one or
more polymorphic models, adding all the fields and relations, required to describe this
type of product.
Some attributes for this class are mandatory. They shall be implemented as property method.
The following fields MUST be implemented by the inheriting class:
``product_name``: Return the pronounced name for this product in its localized language.
Additionally the inheriting class MUST implement the following methods ``get_absolute_url()``
and ``get_price()``. See below for details.
Unless each product variant offers its own product code, it is strongly recommended to add
a field ``product_code = models.CharField(_("Product code"), max_length=255, unique=True)``
to the class implementing the product.
"""
created_at = models.DateTimeField(
_("Created at"),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
active = models.BooleanField(
_("Active"),
default=True,
help_text=_("Is this product publicly visible."),
)
class Meta:
abstract = True
verbose_name = _("Product")
verbose_name_plural = _("Products")
def product_type(self):
"""
Returns the polymorphic type of the product.
"""
return force_str(self.polymorphic_ctype)
product_type.short_description = _("Product type")
@property
def product_model(self):
"""
Returns the polymorphic model name of the product's class.
"""
return self.polymorphic_ctype.model
def get_absolute_url(self):
"""
Hook for returning the canonical Django URL of this product.
"""
msg = "Method get_absolute_url() must be implemented by subclass: `{}`"
raise NotImplementedError(msg.format(self.__class__.__name__))
def get_price(self, request):
"""
Hook for returning the current price of this product.
The price shall be of type Money. Read the appropriate section on how to create a Money
type for the chosen currency.
Use the `request` object to vary the price according to the logged in user,
its country code or the language.
"""
msg = "Method get_price() must be implemented by subclass: `{}`"
raise NotImplementedError(msg.format(self.__class__.__name__))
def get_product_variant(self, **kwargs):
"""
Hook for returning the variant of a product using parameters passed in by **kwargs.
If the product has no variants, then return the product itself.
:param **kwargs: A dictionary describing the product's variations.
"""
return self
def get_product_variants(self):
"""
Hook for returning a queryset of variants for the given product.
If the product has no variants, then the queryset contains just itself.
"""
return self._meta.model.objects.filter(pk=self.pk)
def get_availability(self, request, **kwargs):
"""
Hook for checking the availability of a product.
:param request:
Optionally used to vary the availability according to the logged in user,
its country code or language.
:param **kwargs:
Extra arguments passed to the underlying method. Useful for products with
variations.
:return: An object of type :class:`shop.models.product.Availability`.
"""
return Availability()
def managed_availability(self):
"""
:return True: If this product has its quantity managed by some inventory functionality.
"""
return False
def is_in_cart(self, cart, watched=False, **kwargs):
"""
Checks if the current product is already in the given cart, and if so, returns the
corresponding cart_item.
:param watched (bool): This is used to determine if this check shall only be performed
for the watch-list.
:param **kwargs: Optionally one may pass arbitrary information about the product being looked
up. This can be used to determine if a product with variations shall be considered
equal to the same cart item, resulting in an increase of it's quantity, or if it
shall be considered as a separate cart item, resulting in the creation of a new item.
:returns: The cart item (of type CartItem) containing the product considered as equal to the
current one, or ``None`` if no product matches in the cart.
"""
from shop.models.cart import CartItemModel
cart_item_qs = CartItemModel.objects.filter(cart=cart, product=self)
return cart_item_qs.first()
def deduct_from_stock(self, quantity, **kwargs):
"""
Hook to deduct a number of items of the current product from the stock's inventory.
:param quantity: Number of items to deduct.
:param **kwargs:
Extra arguments passed to the underlying method. Useful for products with
variations.
"""
def get_weight(self):
"""
Optional hook to return the product's gross weight in kg. This information is required to
estimate the shipping costs. The merchants product model shall override this method.
"""
return 0
@classmethod
def check(cls, **kwargs):
"""
Internal method to check consistency of Product model declaration on bootstrapping
application.
"""
errors = super().check(**kwargs)
try:
cls.product_name
except AttributeError:
msg = "Class `{}` must provide a model field implementing `product_name`"
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
def update_search_index(self):
"""
Update the Document inside the Elasticsearch index after changing relevant parts
of the product.
"""
documents = elasticsearch_registry.get_documents([ProductModel])
if settings.USE_I18N:
for language, _ in settings.LANGUAGES:
try:
document = next(doc for doc in documents if doc._language == language)
except StopIteration:
document = next(doc for doc in documents if doc._language is None)
document().update(self)
else:
document = next(doc for doc in documents)
document().update(self)
def invalidate_cache(self):
"""
Method ``ProductCommonSerializer.render_html()`` caches the rendered HTML snippets.
Invalidate this HTML snippet after changing relevant parts of the product.
"""
shop_app = apps.get_app_config('shop')
if shop_app.cache_supporting_wildcard:
cache.delete_pattern('product:{}|*'.format(self.id))
ProductModel = deferred.MaterializedModel(BaseProduct)
class CMSPageReferenceMixin:
"""
Products which refer to CMS pages in order to emulate categories, normally need a method for
being accessed directly through a canonical URL. Add this mixin class for adding a
``get_absolute_url()`` method to any to product model.
"""
category_fields = ['cms_pages'] # used by ProductIndex to fill the categories
def get_absolute_url(self):
"""
Return the absolute URL of a product
"""
# sorting by highest level, so that the canonical URL
# associates with the most generic category
cms_page = self.cms_pages.order_by('node__path').last()
if cms_page is None:
return urljoin('/category-not-assigned/', self.slug)
return urljoin(cms_page.get_absolute_url(), self.slug)
| {
"repo_name": "awesto/django-shop",
"path": "shop/models/product.py",
"copies": "1",
"size": "16044",
"license": "bsd-3-clause",
"hash": -2669159924297466000,
"line_mean": 39.5151515152,
"line_max": 112,
"alpha_frac": 0.6588755921,
"autogenerated": false,
"ratio": 4.638334778837814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019957725416819097,
"num_lines": 396
} |
from functools import reduce
import operator
import datetime
import time
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from django.utils.timezone import make_aware, get_current_timezone
from obcy.models import Joke
SITE_URL = {
'wykop': 'http://wykop.pl',
'codzienny': 'https://facebook.com/sucharcodzienny',
'zacny': 'https://www.facebook.com/1zacnysucharmilordzie1',
'sucharnia': 'https://www.facebook.com/groups/495903230481274'
}
SITE_IMAGE_EXTENSION = {
'wykop': 'png',
'codzienny': 'jpg',
'zacny': 'jpg',
'sucharnia': 'png'
}
def __sort_recalculate(sort, joke):
if sort == 'votes':
votes = joke.votes
if joke.site == 'wykop' or joke.site == 'sucharnia':
votes *= 4.5
return votes
else:
return joke.__getattribute__(sort)
def __add_pages(request, jokes):
paginator = Paginator(jokes, 15)
page = request.GET.get('page')
try:
jokes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
jokes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
jokes = paginator.page(paginator.num_pages)
return jokes
def __add_user(request, context):
page = request.GET.get('page', 1)
user = request.user
if page != 1:
if user.is_authenticated():
if user.first_name:
name = user.first_name
if user.last_name:
name += ' ' + user.last_name
else:
name = user.username
username = user.username
else:
name = None
username = None
context.update({'user_fullname': name, 'username': username})
moderator = True if user.groups.filter(name='Moderator') else False
context.update({'moderator': moderator})
def __last_seen(request):
last = request.session.get('last_seen', False)
request.session['last_seen'] = time.time()
return last
def __order_by(request):
sort = request.GET.get('sort', 'added')
if sort == 'date':
sort = 'added'
reverse = request.GET.get('reversed', True)
if reverse != 'true':
sort = '-' + sort
return sort
def all_sites(request, pages=True, show_hidden=False):
context = {}
jokes = Joke.objects.filter(duplicate=None)
if not show_hidden:
jokes = jokes.filter(hidden=None)
search = request.GET.get('q', '')
if search.strip() != '':
items = search.split()
filter = reduce(operator.and_, (Q(body__icontains=x) for x in items))
jokes = jokes.filter(filter)
context.update({'search': search})
jokes = jokes.order_by(__order_by(request))
if pages:
jokes = __add_pages(request, jokes)
context.update({'jokes': jokes, 'site': 'all'})
context.update({'site_image_extension': SITE_IMAGE_EXTENSION})
__add_user(request, context)
last_seen = __last_seen(request)
if last_seen and time.time() - last_seen > 1:
context.update(
{'last_seen': make_aware(datetime.datetime.fromtimestamp(last_seen + 1), get_current_timezone())})
return context
def one_joke(request, jokeslug):
joke = Joke.objects.get(slug=jokeslug)
site_url = SITE_URL[joke.site]
context = {'joke': joke, 'site_url': site_url, 'site_image_extension': SITE_IMAGE_EXTENSION}
__add_user(request, context)
return context
def random(request, pages=True):
jokes = Joke.objects.filter(duplicate=None).filter(hidden=None).order_by('?')
if pages:
jokes = __add_pages(request, jokes)
context = {'jokes': jokes, 'site': 'all', 'site_image_extension': SITE_IMAGE_EXTENSION, 'random': True}
__add_user(request, context)
return context
def unverified(request):
jokes = Joke.objects.filter(duplicate=None).filter(hidden=None).filter(verified=None).order_by(__order_by(request))
jokes = __add_pages(request, jokes)
context = {'jokes': jokes, 'site': 'all', 'site_image_extension': SITE_IMAGE_EXTENSION}
__add_user(request, context)
return context | {
"repo_name": "jchmura/suchary-django",
"path": "obcy/extras/prepare_view.py",
"copies": "1",
"size": "4222",
"license": "mit",
"hash": 6270075118385788000,
"line_mean": 28.7394366197,
"line_max": 119,
"alpha_frac": 0.6229275225,
"autogenerated": false,
"ratio": 3.3938906752411575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9512750200463824,
"avg_score": 0.0008135994554667848,
"num_lines": 142
} |
from functools import reduce
import operator
import math
from llvmlite import ir
import llvmlite.binding as ll
from numba.core.imputils import Registry
from numba.core.typing.npydecl import parse_dtype, signature
from numba.core import types, cgutils
from .cudadrv import nvvm
from numba import cuda
from numba.cuda import nvvmutils, stubs
from numba.cuda.types import dim3, grid_group
registry = Registry()
lower = registry.lower
lower_attr = registry.lower_getattr
def initialize_dim3(builder, prefix):
x = nvvmutils.call_sreg(builder, "%s.x" % prefix)
y = nvvmutils.call_sreg(builder, "%s.y" % prefix)
z = nvvmutils.call_sreg(builder, "%s.z" % prefix)
return cgutils.pack_struct(builder, (x, y, z))
@lower_attr(types.Module(cuda), 'threadIdx')
def cuda_threadIdx(context, builder, sig, args):
return initialize_dim3(builder, 'tid')
@lower_attr(types.Module(cuda), 'blockDim')
def cuda_blockDim(context, builder, sig, args):
return initialize_dim3(builder, 'ntid')
@lower_attr(types.Module(cuda), 'blockIdx')
def cuda_blockIdx(context, builder, sig, args):
return initialize_dim3(builder, 'ctaid')
@lower_attr(types.Module(cuda), 'gridDim')
def cuda_gridDim(context, builder, sig, args):
return initialize_dim3(builder, 'nctaid')
@lower_attr(types.Module(cuda), 'laneid')
def cuda_laneid(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'laneid')
@lower_attr(types.Module(cuda), 'warpsize')
def cuda_warpsize(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'warpsize')
@lower_attr(dim3, 'x')
def dim3_x(context, builder, sig, args):
return builder.extract_value(args, 0)
@lower_attr(dim3, 'y')
def dim3_y(context, builder, sig, args):
return builder.extract_value(args, 1)
@lower_attr(dim3, 'z')
def dim3_z(context, builder, sig, args):
return builder.extract_value(args, 2)
@lower(cuda.cg.this_grid)
def cg_this_grid(context, builder, sig, args):
one = context.get_constant(types.int32, 1)
lmod = builder.module
return builder.call(
nvvmutils.declare_cudaCGGetIntrinsicHandle(lmod),
(one,))
@lower('GridGroup.sync', grid_group)
def ptx_sync_group(context, builder, sig, args):
flags = context.get_constant(types.int32, 0)
lmod = builder.module
return builder.call(
nvvmutils.declare_cudaCGSynchronize(lmod),
(*args, flags))
# -----------------------------------------------------------------------------
@lower(cuda.grid, types.int32)
def cuda_grid(context, builder, sig, args):
restype = sig.return_type
if restype == types.int32:
return nvvmutils.get_global_id(builder, dim=1)
elif isinstance(restype, types.UniTuple):
ids = nvvmutils.get_global_id(builder, dim=restype.count)
return cgutils.pack_array(builder, ids)
else:
raise ValueError('Unexpected return type %s from cuda.grid' % restype)
def _nthreads_for_dim(builder, dim):
ntid = nvvmutils.call_sreg(builder, f"ntid.{dim}")
nctaid = nvvmutils.call_sreg(builder, f"nctaid.{dim}")
return builder.mul(ntid, nctaid)
@lower(cuda.gridsize, types.int32)
def cuda_gridsize(context, builder, sig, args):
restype = sig.return_type
nx = _nthreads_for_dim(builder, 'x')
if restype == types.int32:
return nx
elif isinstance(restype, types.UniTuple):
ny = _nthreads_for_dim(builder, 'y')
if restype.count == 2:
return cgutils.pack_array(builder, (nx, ny))
elif restype.count == 3:
nz = _nthreads_for_dim(builder, 'z')
return cgutils.pack_array(builder, (nx, ny, nz))
# Fallthrough to here indicates unexpected return type or tuple length
raise ValueError('Unexpected return type %s of cuda.gridsize' % restype)
# -----------------------------------------------------------------------------
@lower(cuda.const.array_like, types.Array)
def cuda_const_array_like(context, builder, sig, args):
# This is a no-op because CUDATargetContext.make_constant_array already
# created the constant array.
return args[0]
_unique_smem_id = 0
def _get_unique_smem_id(name):
"""Due to bug with NVVM invalid internalizing of shared memory in the
PTX output. We can't mark shared memory to be internal. We have to
ensure unique name is generated for shared memory symbol.
"""
global _unique_smem_id
_unique_smem_id += 1
return "{0}_{1}".format(name, _unique_smem_id)
@lower(cuda.shared.array, types.IntegerLiteral, types.Any)
def cuda_shared_array_integer(context, builder, sig, args):
length = sig.args[0].literal_value
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name=_get_unique_smem_id('_cudapy_smem'),
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@lower(cuda.shared.array, types.Tuple, types.Any)
@lower(cuda.shared.array, types.UniTuple, types.Any)
def cuda_shared_array_tuple(context, builder, sig, args):
shape = [ s.literal_value for s in sig.args[0] ]
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name=_get_unique_smem_id('_cudapy_smem'),
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@lower(cuda.local.array, types.IntegerLiteral, types.Any)
def cuda_local_array_integer(context, builder, sig, args):
length = sig.args[0].literal_value
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@lower(cuda.local.array, types.Tuple, types.Any)
@lower(cuda.local.array, types.UniTuple, types.Any)
def ptx_lmem_alloc_array(context, builder, sig, args):
shape = [ s.literal_value for s in sig.args[0] ]
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@lower(stubs.syncthreads)
def ptx_syncthreads(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.barrier0'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), ())
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.syncthreads_count, types.i4)
def ptx_syncthreads_count(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.popc'
lmod = builder.module
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),))
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(sync, args)
@lower(stubs.syncthreads_and, types.i4)
def ptx_syncthreads_and(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.and'
lmod = builder.module
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),))
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(sync, args)
@lower(stubs.syncthreads_or, types.i4)
def ptx_syncthreads_or(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.or'
lmod = builder.module
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),))
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(sync, args)
@lower(stubs.threadfence_block)
def ptx_threadfence_block(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.cta'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), ())
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.threadfence_system)
def ptx_threadfence_system(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.sys'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), ())
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.threadfence)
def ptx_threadfence_device(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.gl'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), ())
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.syncwarp)
def ptx_syncwarp(context, builder, sig, args):
mask = context.get_constant(types.int32, 0xFFFFFFFF)
mask_sig = signature(types.none, types.int32)
return ptx_syncwarp_mask(context, builder, mask_sig, [mask])
@lower(stubs.syncwarp, types.i4)
def ptx_syncwarp_mask(context, builder, sig, args):
fname = 'llvm.nvvm.bar.warp.sync'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), (ir.IntType(32),))
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, args)
return context.get_dummy_value()
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i4, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i8, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f4, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f8, types.i4,
types.i4)
def ptx_shfl_sync_i32(context, builder, sig, args):
"""
The NVVM intrinsic for shfl only supports i32, but the cuda intrinsic
function supports both 32 and 64 bit ints and floats, so for feature parity,
i64, f32, and f64 are implemented. Floats by way of bitcasting the float to
an int, then shuffling, then bitcasting back. And 64-bit values by packing
them into 2 32bit values, shuffling thoose, and then packing back together.
"""
mask, mode, value, index, clamp = args
value_type = sig.args[2]
if value_type in types.real_domain:
value = builder.bitcast(value, ir.IntType(value_type.bitwidth))
fname = 'llvm.nvvm.shfl.sync.i32'
lmod = builder.module
fnty = ir.FunctionType(
ir.LiteralStructType((ir.IntType(32), ir.IntType(1))),
(ir.IntType(32), ir.IntType(32), ir.IntType(32),
ir.IntType(32), ir.IntType(32))
)
func = cgutils.get_or_insert_function(lmod, fnty, fname)
if value_type.bitwidth == 32:
ret = builder.call(func, (mask, mode, value, index, clamp))
if value_type == types.float32:
rv = builder.extract_value(ret, 0)
pred = builder.extract_value(ret, 1)
fv = builder.bitcast(rv, ir.FloatType())
ret = cgutils.make_anonymous_struct(builder, (fv, pred))
else:
value1 = builder.trunc(value, ir.IntType(32))
value_lshr = builder.lshr(value, context.get_constant(types.i8, 32))
value2 = builder.trunc(value_lshr, ir.IntType(32))
ret1 = builder.call(func, (mask, mode, value1, index, clamp))
ret2 = builder.call(func, (mask, mode, value2, index, clamp))
rv1 = builder.extract_value(ret1, 0)
rv2 = builder.extract_value(ret2, 0)
pred = builder.extract_value(ret1, 1)
rv1_64 = builder.zext(rv1, ir.IntType(64))
rv2_64 = builder.zext(rv2, ir.IntType(64))
rv_shl = builder.shl(rv2_64, context.get_constant(types.i8, 32))
rv = builder.or_(rv_shl, rv1_64)
if value_type == types.float64:
rv = builder.bitcast(rv, ir.DoubleType())
ret = cgutils.make_anonymous_struct(builder, (rv, pred))
return ret
@lower(stubs.vote_sync_intrinsic, types.i4, types.i4, types.boolean)
def ptx_vote_sync(context, builder, sig, args):
fname = 'llvm.nvvm.vote.sync'
lmod = builder.module
fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32),
ir.IntType(1))),
(ir.IntType(32), ir.IntType(32), ir.IntType(1)))
func = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(func, args)
@lower(stubs.match_any_sync, types.i4, types.i4)
@lower(stubs.match_any_sync, types.i4, types.i8)
@lower(stubs.match_any_sync, types.i4, types.f4)
@lower(stubs.match_any_sync, types.i4, types.f8)
def ptx_match_any_sync(context, builder, sig, args):
mask, value = args
width = sig.args[1].bitwidth
if sig.args[1] in types.real_domain:
value = builder.bitcast(value, ir.IntType(width))
fname = 'llvm.nvvm.match.any.sync.i{}'.format(width)
lmod = builder.module
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32), ir.IntType(width)))
func = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(func, (mask, value))
@lower(stubs.match_all_sync, types.i4, types.i4)
@lower(stubs.match_all_sync, types.i4, types.i8)
@lower(stubs.match_all_sync, types.i4, types.f4)
@lower(stubs.match_all_sync, types.i4, types.f8)
def ptx_match_all_sync(context, builder, sig, args):
mask, value = args
width = sig.args[1].bitwidth
if sig.args[1] in types.real_domain:
value = builder.bitcast(value, ir.IntType(width))
fname = 'llvm.nvvm.match.all.sync.i{}'.format(width)
lmod = builder.module
fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32),
ir.IntType(1))),
(ir.IntType(32), ir.IntType(width)))
func = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(func, (mask, value))
@lower(stubs.popc, types.Any)
def ptx_popc(context, builder, sig, args):
return builder.ctpop(args[0])
@lower(stubs.fma, types.Any, types.Any, types.Any)
def ptx_fma(context, builder, sig, args):
return builder.fma(*args)
# See:
# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrt.html#__nv_cbrt
# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrtf.html#__nv_cbrtf
cbrt_funcs = {
types.float32: '__nv_cbrtf',
types.float64: '__nv_cbrt',
}
@lower(stubs.cbrt, types.float32)
@lower(stubs.cbrt, types.float64)
def ptx_cbrt(context, builder, sig, args):
ty = sig.return_type
fname = cbrt_funcs[ty]
fty = context.get_value_type(ty)
lmod = builder.module
fnty = ir.FunctionType(fty, [fty])
fn = cgutils.get_or_insert_function(lmod, fnty, fname)
return builder.call(fn, args)
@lower(stubs.brev, types.u4)
def ptx_brev_u4(context, builder, sig, args):
# FIXME the llvm.bitreverse.i32 intrinsic isn't supported by nvcc
# return builder.bitreverse(args[0])
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(ir.IntType(32), (ir.IntType(32),)),
'__nv_brev')
return builder.call(fn, args)
@lower(stubs.brev, types.u8)
def ptx_brev_u8(context, builder, sig, args):
# FIXME the llvm.bitreverse.i64 intrinsic isn't supported by nvcc
# return builder.bitreverse(args[0])
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(ir.IntType(64), (ir.IntType(64),)),
'__nv_brevll')
return builder.call(fn, args)
@lower(stubs.clz, types.Any)
def ptx_clz(context, builder, sig, args):
return builder.ctlz(
args[0],
context.get_constant(types.boolean, 0))
@lower(stubs.ffs, types.Any)
def ptx_ffs(context, builder, sig, args):
return builder.cttz(
args[0],
context.get_constant(types.boolean, 0))
@lower(stubs.selp, types.Any, types.Any, types.Any)
def ptx_selp(context, builder, sig, args):
test, a, b = args
return builder.select(test, a, b)
@lower(max, types.f4, types.f4)
def ptx_max_f4(context, builder, sig, args):
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(
ir.FloatType(),
(ir.FloatType(), ir.FloatType())),
'__nv_fmaxf')
return builder.call(fn, args)
@lower(max, types.f8, types.f4)
@lower(max, types.f4, types.f8)
@lower(max, types.f8, types.f8)
def ptx_max_f8(context, builder, sig, args):
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(
ir.DoubleType(),
(ir.DoubleType(), ir.DoubleType())),
'__nv_fmax')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
context.cast(builder, args[1], sig.args[1], types.double),
])
@lower(min, types.f4, types.f4)
def ptx_min_f4(context, builder, sig, args):
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(
ir.FloatType(),
(ir.FloatType(), ir.FloatType())),
'__nv_fminf')
return builder.call(fn, args)
@lower(min, types.f8, types.f4)
@lower(min, types.f4, types.f8)
@lower(min, types.f8, types.f8)
def ptx_min_f8(context, builder, sig, args):
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(
ir.DoubleType(),
(ir.DoubleType(), ir.DoubleType())),
'__nv_fmin')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
context.cast(builder, args[1], sig.args[1], types.double),
])
@lower(round, types.f4)
@lower(round, types.f8)
def ptx_round(context, builder, sig, args):
fn = cgutils.get_or_insert_function(
builder.module,
ir.FunctionType(
ir.IntType(64),
(ir.DoubleType(),)),
'__nv_llrint')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
])
# This rounding implementation follows the algorithm used in the "fallback
# version" of double_round in CPython.
# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/floatobject.c#L964-L1007
@lower(round, types.f4, types.Integer)
@lower(round, types.f8, types.Integer)
def round_to_impl(context, builder, sig, args):
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
z = round(y)
if (math.fabs(y - z) == 0.5):
# halfway between two integers; use round-half-even
z = 2.0 * round(y / 2.0)
if ndigits >= 0:
z = (z / pow2) / pow1
else:
z *= pow1
return z
return context.compile_internal(builder, round_ndigits, sig, args, )
def gen_deg_rad(const):
def impl(context, builder, sig, args):
argty, = sig.args
factor = context.get_constant(argty, const)
return builder.fmul(factor, args[0])
return impl
_deg2rad = math.pi / 180.
_rad2deg = 180. / math.pi
lower(math.radians, types.f4)(gen_deg_rad(_deg2rad))
lower(math.radians, types.f8)(gen_deg_rad(_deg2rad))
lower(math.degrees, types.f4)(gen_deg_rad(_rad2deg))
lower(math.degrees, types.f8)(gen_deg_rad(_rad2deg))
def _normalize_indices(context, builder, indty, inds):
"""
Convert integer indices into tuple of intp
"""
if indty in types.integer_domain:
indty = types.UniTuple(dtype=indty, count=1)
indices = [inds]
else:
indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(indty, indices)]
return indty, indices
def _atomic_dispatcher(dispatch_fn):
def imp(context, builder, sig, args):
# The common argument handling code
aryty, indty, valty = sig.args
ary, inds, val = args
dtype = aryty.dtype
indty, indices = _normalize_indices(context, builder, indty, inds)
if dtype != valty:
raise TypeError("expect %s but got %s" % (dtype, valty))
if aryty.ndim != len(indty):
raise TypeError("indexing %d-D array with %d-D index" %
(aryty.ndim, len(indty)))
lary = context.make_array(aryty)(context, builder, ary)
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices,
wraparound=True)
# dispatcher to implementation base on dtype
return dispatch_fn(context, builder, dtype, ptr, val)
return imp
@lower(stubs.atomic.add, types.Array, types.intp, types.Any)
@lower(stubs.atomic.add, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.add, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_add_tuple(context, builder, dtype, ptr, val):
if dtype == types.float32:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_add_float32(lmod),
(ptr, val))
elif dtype == types.float64:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_add_float64(lmod),
(ptr, val))
else:
return builder.atomic_rmw('add', ptr, val, 'monotonic')
@lower(stubs.atomic.sub, types.Array, types.intp, types.Any)
@lower(stubs.atomic.sub, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.sub, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_sub(context, builder, dtype, ptr, val):
if dtype == types.float32:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_sub_float32(lmod),
(ptr, val))
elif dtype == types.float64:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_sub_float64(lmod),
(ptr, val))
else:
return builder.atomic_rmw('sub', ptr, val, 'monotonic')
@lower(stubs.atomic.inc, types.Array, types.intp, types.Any)
@lower(stubs.atomic.inc, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.inc, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_inc(context, builder, dtype, ptr, val):
if dtype in cuda.cudadecl.unsigned_int_numba_types:
bw = dtype.bitwidth
lmod = builder.module
fn = getattr(nvvmutils, f'declare_atomic_inc_int{bw}')
return builder.call(fn(lmod), (ptr, val))
else:
raise TypeError(f'Unimplemented atomic inc with {dtype} array')
@lower(stubs.atomic.dec, types.Array, types.intp, types.Any)
@lower(stubs.atomic.dec, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.dec, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_dec(context, builder, dtype, ptr, val):
if dtype in cuda.cudadecl.unsigned_int_numba_types:
bw = dtype.bitwidth
lmod = builder.module
fn = getattr(nvvmutils, f'declare_atomic_dec_int{bw}')
return builder.call(fn(lmod), (ptr, val))
else:
raise TypeError(f'Unimplemented atomic dec with {dtype} array')
def ptx_atomic_bitwise(stub, op):
@_atomic_dispatcher
def impl_ptx_atomic(context, builder, dtype, ptr, val):
if dtype in (cuda.cudadecl.integer_numba_types):
return builder.atomic_rmw(op, ptr, val, 'monotonic')
else:
raise TypeError(f'Unimplemented atomic {op} with {dtype} array')
for ty in (types.intp, types.UniTuple, types.Tuple):
lower(stub, types.Array, ty, types.Any)(impl_ptx_atomic)
ptx_atomic_bitwise(stubs.atomic.and_, 'and')
ptx_atomic_bitwise(stubs.atomic.or_, 'or')
ptx_atomic_bitwise(stubs.atomic.xor, 'xor')
@lower(stubs.atomic.exch, types.Array, types.intp, types.Any)
@lower(stubs.atomic.exch, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.exch, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_exch(context, builder, dtype, ptr, val):
if dtype in (cuda.cudadecl.integer_numba_types):
return builder.atomic_rmw('xchg', ptr, val, 'monotonic')
else:
raise TypeError(f'Unimplemented atomic exch with {dtype} array')
@lower(stubs.atomic.max, types.Array, types.intp, types.Any)
@lower(stubs.atomic.max, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.max, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_max(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_max_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_max_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic max with %s array' % dtype)
@lower(stubs.atomic.min, types.Array, types.intp, types.Any)
@lower(stubs.atomic.min, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.min, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_min(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_min_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_min_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic min with %s array' % dtype)
@lower(stubs.atomic.nanmax, types.Array, types.intp, types.Any)
@lower(stubs.atomic.nanmax, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.nanmax, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_nanmax(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_nanmax_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_nanmax_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic max with %s array' % dtype)
@lower(stubs.atomic.nanmin, types.Array, types.intp, types.Any)
@lower(stubs.atomic.nanmin, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.nanmin, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_nanmin(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_nanmin_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_nanmin_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic min with %s array' % dtype)
@lower(stubs.atomic.compare_and_swap, types.Array, types.Any, types.Any)
def ptx_atomic_cas_tuple(context, builder, sig, args):
aryty, oldty, valty = sig.args
ary, old, val = args
dtype = aryty.dtype
lary = context.make_array(aryty)(context, builder, ary)
zero = context.get_constant(types.intp, 0)
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, (zero,))
if aryty.dtype in (cuda.cudadecl.integer_numba_types):
lmod = builder.module
bitwidth = aryty.dtype.bitwidth
return nvvmutils.atomic_cmpxchg(builder, lmod, bitwidth, ptr, old, val)
else:
raise TypeError('Unimplemented atomic compare_and_swap '
'with %s array' % dtype)
# -----------------------------------------------------------------------------
def _get_target_data(context):
return ll.create_target_data(nvvm.data_layout[context.address_size])
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace,
can_dynsized=False):
elemcount = reduce(operator.mul, shape, 1)
# Check for valid shape for this type of allocation.
# Only 1d arrays can be dynamic.
dynamic_smem = elemcount <= 0 and can_dynsized and len(shape) == 1
if elemcount <= 0 and not dynamic_smem:
raise ValueError("array length <= 0")
# Check that we support the requested dtype
other_supported_type = isinstance(dtype, (types.Record, types.Boolean))
if dtype not in types.number_domain and not other_supported_type:
raise TypeError("unsupported type: %s" % dtype)
lldtype = context.get_data_type(dtype)
laryty = ir.ArrayType(lldtype, elemcount)
if addrspace == nvvm.ADDRSPACE_LOCAL:
# Special case local address space allocation to use alloca
# NVVM is smart enough to only use local memory if no register is
# available
dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)
else:
lmod = builder.module
# Create global variable in the requested address space
gvmem = cgutils.add_global_variable(lmod, laryty, symbol_name,
addrspace)
# Specify alignment to avoid misalignment bug
align = context.get_abi_sizeof(lldtype)
# Alignment is required to be a power of 2 for shared memory. If it is
# not a power of 2 (e.g. for a Record array) then round up accordingly.
gvmem.align = 1 << (align - 1 ).bit_length()
if dynamic_smem:
gvmem.linkage = 'external'
else:
## Comment out the following line to workaround a NVVM bug
## which generates a invalid symbol name when the linkage
## is internal and in some situation.
## See _get_unique_smem_id()
# gvmem.linkage = lc.LINKAGE_INTERNAL
gvmem.initializer = ir.Constant(laryty, ir.Undefined)
# Convert to generic address-space
conv = nvvmutils.insert_addrspace_conv(lmod, ir.IntType(8), addrspace)
addrspaceptr = gvmem.bitcast(ir.PointerType(ir.IntType(8), addrspace))
dataptr = builder.call(conv, [addrspaceptr])
targetdata = _get_target_data(context)
lldtype = context.get_data_type(dtype)
itemsize = lldtype.get_abi_size(targetdata)
# Compute strides
laststride = itemsize
rstrides = []
for i, lastsize in enumerate(reversed(shape)):
rstrides.append(laststride)
laststride *= lastsize
strides = [s for s in reversed(rstrides)]
kstrides = [context.get_constant(types.intp, s) for s in strides]
# Compute shape
if dynamic_smem:
# Compute the shape based on the dynamic shared memory configuration.
# Unfortunately NVVM does not provide an intrinsic for the
# %dynamic_smem_size register, so we must read it using inline
# assembly.
get_dynshared_size = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []),
"mov.u32 $0, %dynamic_smem_size;",
'=r', side_effect=True)
dynsmem_size = builder.zext(builder.call(get_dynshared_size, []),
ir.IntType(64))
# Only 1-D dynamic shared memory is supported so the following is a
# sufficient construction of the shape
kitemsize = context.get_constant(types.intp, itemsize)
kshape = [builder.udiv(dynsmem_size, kitemsize)]
else:
kshape = [context.get_constant(types.intp, s) for s in shape]
# Create array object
ndim = len(shape)
aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')
ary = context.make_array(aryty)(context, builder)
context.populate_array(ary,
data=builder.bitcast(dataptr, ary.data.type),
shape=kshape,
strides=kstrides,
itemsize=context.get_constant(types.intp, itemsize),
meminfo=None)
return ary._getvalue()
| {
"repo_name": "gmarkall/numba",
"path": "numba/cuda/cudaimpl.py",
"copies": "1",
"size": "33252",
"license": "bsd-2-clause",
"hash": -7004433995104263000,
"line_mean": 35.3409836066,
"line_max": 114,
"alpha_frac": 0.6378864429,
"autogenerated": false,
"ratio": 3.2587220697765584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43966085126765586,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
import math
from llvmlite.llvmpy.core import Type, InlineAsm
import llvmlite.llvmpy.core as lc
import llvmlite.binding as ll
from numba.core.imputils import Registry
from numba.core.typing.npydecl import parse_dtype
from numba.core import types, cgutils
from .cudadrv import nvvm
from numba import cuda
from numba.cuda import nvvmutils, stubs
from numba.cuda.types import dim3
registry = Registry()
lower = registry.lower
lower_attr = registry.lower_getattr
def initialize_dim3(builder, prefix):
x = nvvmutils.call_sreg(builder, "%s.x" % prefix)
y = nvvmutils.call_sreg(builder, "%s.y" % prefix)
z = nvvmutils.call_sreg(builder, "%s.z" % prefix)
return cgutils.pack_struct(builder, (x, y, z))
@lower_attr(types.Module(cuda), 'threadIdx')
def cuda_threadIdx(context, builder, sig, args):
return initialize_dim3(builder, 'tid')
@lower_attr(types.Module(cuda), 'blockDim')
def cuda_blockDim(context, builder, sig, args):
return initialize_dim3(builder, 'ntid')
@lower_attr(types.Module(cuda), 'blockIdx')
def cuda_blockIdx(context, builder, sig, args):
return initialize_dim3(builder, 'ctaid')
@lower_attr(types.Module(cuda), 'gridDim')
def cuda_gridDim(context, builder, sig, args):
return initialize_dim3(builder, 'nctaid')
@lower_attr(types.Module(cuda), 'laneid')
def cuda_laneid(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'laneid')
@lower_attr(types.Module(cuda), 'warpsize')
def cuda_warpsize(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'warpsize')
@lower_attr(dim3, 'x')
def dim3_x(context, builder, sig, args):
return builder.extract_value(args, 0)
@lower_attr(dim3, 'y')
def dim3_y(context, builder, sig, args):
return builder.extract_value(args, 1)
@lower_attr(dim3, 'z')
def dim3_z(context, builder, sig, args):
return builder.extract_value(args, 2)
@lower(cuda.grid, types.int32)
def cuda_grid(context, builder, sig, args):
restype = sig.return_type
if restype == types.int32:
return nvvmutils.get_global_id(builder, dim=1)
elif isinstance(restype, types.UniTuple):
ids = nvvmutils.get_global_id(builder, dim=restype.count)
return cgutils.pack_array(builder, ids)
else:
raise ValueError('Unexpected return type %s from cuda.grid' % restype)
def _nthreads_for_dim(builder, dim):
ntid = nvvmutils.call_sreg(builder, f"ntid.{dim}")
nctaid = nvvmutils.call_sreg(builder, f"nctaid.{dim}")
return builder.mul(ntid, nctaid)
@lower(cuda.gridsize, types.int32)
def cuda_gridsize(context, builder, sig, args):
restype = sig.return_type
nx = _nthreads_for_dim(builder, 'x')
if restype == types.int32:
return nx
elif isinstance(restype, types.UniTuple):
ny = _nthreads_for_dim(builder, 'y')
if restype.count == 2:
return cgutils.pack_array(builder, (nx, ny))
elif restype.count == 3:
nz = _nthreads_for_dim(builder, 'z')
return cgutils.pack_array(builder, (nx, ny, nz))
# Fallthrough to here indicates unexpected return type or tuple length
raise ValueError('Unexpected return type %s of cuda.gridsize' % restype)
# -----------------------------------------------------------------------------
@lower(cuda.const.array_like, types.Array)
def cuda_const_array_like(context, builder, sig, args):
# This is a no-op because CUDATargetContext.make_constant_array already
# created the constant array.
return args[0]
_unique_smem_id = 0
def _get_unique_smem_id(name):
"""Due to bug with NVVM invalid internalizing of shared memory in the
PTX output. We can't mark shared memory to be internal. We have to
ensure unique name is generated for shared memory symbol.
"""
global _unique_smem_id
_unique_smem_id += 1
return "{0}_{1}".format(name, _unique_smem_id)
@lower(cuda.shared.array, types.IntegerLiteral, types.Any)
def cuda_shared_array_integer(context, builder, sig, args):
length = sig.args[0].literal_value
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name=_get_unique_smem_id('_cudapy_smem'),
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@lower(cuda.shared.array, types.Tuple, types.Any)
@lower(cuda.shared.array, types.UniTuple, types.Any)
def cuda_shared_array_tuple(context, builder, sig, args):
shape = [ s.literal_value for s in sig.args[0] ]
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name=_get_unique_smem_id('_cudapy_smem'),
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@lower(cuda.local.array, types.IntegerLiteral, types.Any)
def cuda_local_array_integer(context, builder, sig, args):
length = sig.args[0].literal_value
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@lower(cuda.local.array, types.Tuple, types.Any)
@lower(cuda.local.array, types.UniTuple, types.Any)
def ptx_lmem_alloc_array(context, builder, sig, args):
shape = [ s.literal_value for s in sig.args[0] ]
dtype = parse_dtype(sig.args[1])
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@lower(stubs.syncthreads)
def ptx_syncthreads(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.barrier0'
lmod = builder.module
fnty = Type.function(Type.void(), ())
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.syncthreads_count, types.i4)
def ptx_syncthreads_count(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.popc'
lmod = builder.module
fnty = Type.function(Type.int(32), (Type.int(32),))
sync = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(sync, args)
@lower(stubs.syncthreads_and, types.i4)
def ptx_syncthreads_and(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.and'
lmod = builder.module
fnty = Type.function(Type.int(32), (Type.int(32),))
sync = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(sync, args)
@lower(stubs.syncthreads_or, types.i4)
def ptx_syncthreads_or(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0.or'
lmod = builder.module
fnty = Type.function(Type.int(32), (Type.int(32),))
sync = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(sync, args)
@lower(stubs.threadfence_block)
def ptx_threadfence_block(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.cta'
lmod = builder.module
fnty = Type.function(Type.void(), ())
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.threadfence_system)
def ptx_threadfence_system(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.sys'
lmod = builder.module
fnty = Type.function(Type.void(), ())
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.threadfence)
def ptx_threadfence_device(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.membar.gl'
lmod = builder.module
fnty = Type.function(Type.void(), ())
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, ())
return context.get_dummy_value()
@lower(stubs.syncwarp, types.i4)
def ptx_warp_sync(context, builder, sig, args):
fname = 'llvm.nvvm.bar.warp.sync'
lmod = builder.module
fnty = Type.function(Type.void(), (Type.int(32),))
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, args)
return context.get_dummy_value()
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i4, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i8, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f4, types.i4,
types.i4)
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f8, types.i4,
types.i4)
def ptx_shfl_sync_i32(context, builder, sig, args):
"""
The NVVM intrinsic for shfl only supports i32, but the cuda intrinsic
function supports both 32 and 64 bit ints and floats, so for feature parity,
i64, f32, and f64 are implemented. Floats by way of bitcasting the float to
an int, then shuffling, then bitcasting back. And 64-bit values by packing
them into 2 32bit values, shuffling thoose, and then packing back together.
"""
mask, mode, value, index, clamp = args
value_type = sig.args[2]
if value_type in types.real_domain:
value = builder.bitcast(value, Type.int(value_type.bitwidth))
fname = 'llvm.nvvm.shfl.sync.i32'
lmod = builder.module
fnty = Type.function(
Type.struct((Type.int(32), Type.int(1))),
(Type.int(32), Type.int(32), Type.int(32), Type.int(32), Type.int(32))
)
func = lmod.get_or_insert_function(fnty, name=fname)
if value_type.bitwidth == 32:
ret = builder.call(func, (mask, mode, value, index, clamp))
if value_type == types.float32:
rv = builder.extract_value(ret, 0)
pred = builder.extract_value(ret, 1)
fv = builder.bitcast(rv, Type.float())
ret = cgutils.make_anonymous_struct(builder, (fv, pred))
else:
value1 = builder.trunc(value, Type.int(32))
value_lshr = builder.lshr(value, context.get_constant(types.i8, 32))
value2 = builder.trunc(value_lshr, Type.int(32))
ret1 = builder.call(func, (mask, mode, value1, index, clamp))
ret2 = builder.call(func, (mask, mode, value2, index, clamp))
rv1 = builder.extract_value(ret1, 0)
rv2 = builder.extract_value(ret2, 0)
pred = builder.extract_value(ret1, 1)
rv1_64 = builder.zext(rv1, Type.int(64))
rv2_64 = builder.zext(rv2, Type.int(64))
rv_shl = builder.shl(rv2_64, context.get_constant(types.i8, 32))
rv = builder.or_(rv_shl, rv1_64)
if value_type == types.float64:
rv = builder.bitcast(rv, Type.double())
ret = cgutils.make_anonymous_struct(builder, (rv, pred))
return ret
@lower(stubs.vote_sync_intrinsic, types.i4, types.i4, types.boolean)
def ptx_vote_sync(context, builder, sig, args):
fname = 'llvm.nvvm.vote.sync'
lmod = builder.module
fnty = Type.function(Type.struct((Type.int(32), Type.int(1))),
(Type.int(32), Type.int(32), Type.int(1)))
func = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(func, args)
@lower(stubs.match_any_sync, types.i4, types.i4)
@lower(stubs.match_any_sync, types.i4, types.i8)
@lower(stubs.match_any_sync, types.i4, types.f4)
@lower(stubs.match_any_sync, types.i4, types.f8)
def ptx_match_any_sync(context, builder, sig, args):
mask, value = args
width = sig.args[1].bitwidth
if sig.args[1] in types.real_domain:
value = builder.bitcast(value, Type.int(width))
fname = 'llvm.nvvm.match.any.sync.i{}'.format(width)
lmod = builder.module
fnty = Type.function(Type.int(32), (Type.int(32), Type.int(width)))
func = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(func, (mask, value))
@lower(stubs.match_all_sync, types.i4, types.i4)
@lower(stubs.match_all_sync, types.i4, types.i8)
@lower(stubs.match_all_sync, types.i4, types.f4)
@lower(stubs.match_all_sync, types.i4, types.f8)
def ptx_match_all_sync(context, builder, sig, args):
mask, value = args
width = sig.args[1].bitwidth
if sig.args[1] in types.real_domain:
value = builder.bitcast(value, Type.int(width))
fname = 'llvm.nvvm.match.all.sync.i{}'.format(width)
lmod = builder.module
fnty = Type.function(Type.struct((Type.int(32), Type.int(1))),
(Type.int(32), Type.int(width)))
func = lmod.get_or_insert_function(fnty, name=fname)
return builder.call(func, (mask, value))
@lower(stubs.popc, types.Any)
def ptx_popc(context, builder, sig, args):
return builder.ctpop(args[0])
@lower(stubs.fma, types.Any, types.Any, types.Any)
def ptx_fma(context, builder, sig, args):
return builder.fma(*args)
@lower(stubs.brev, types.u4)
def ptx_brev_u4(context, builder, sig, args):
# FIXME the llvm.bitreverse.i32 intrinsic isn't supported by nvcc
# return builder.bitreverse(args[0])
fn = builder.module.get_or_insert_function(
lc.Type.function(lc.Type.int(32), (lc.Type.int(32),)),
'__nv_brev')
return builder.call(fn, args)
@lower(stubs.brev, types.u8)
def ptx_brev_u8(context, builder, sig, args):
# FIXME the llvm.bitreverse.i64 intrinsic isn't supported by nvcc
# return builder.bitreverse(args[0])
fn = builder.module.get_or_insert_function(
lc.Type.function(lc.Type.int(64), (lc.Type.int(64),)),
'__nv_brevll')
return builder.call(fn, args)
@lower(stubs.clz, types.Any)
def ptx_clz(context, builder, sig, args):
return builder.ctlz(
args[0],
context.get_constant(types.boolean, 0))
@lower(stubs.ffs, types.Any)
def ptx_ffs(context, builder, sig, args):
return builder.cttz(
args[0],
context.get_constant(types.boolean, 0))
@lower(stubs.selp, types.Any, types.Any, types.Any)
def ptx_selp(context, builder, sig, args):
test, a, b = args
return builder.select(test, a, b)
@lower(max, types.f4, types.f4)
def ptx_max_f4(context, builder, sig, args):
fn = builder.module.get_or_insert_function(
lc.Type.function(
lc.Type.float(),
(lc.Type.float(), lc.Type.float())),
'__nv_fmaxf')
return builder.call(fn, args)
@lower(max, types.f8, types.f4)
@lower(max, types.f4, types.f8)
@lower(max, types.f8, types.f8)
def ptx_max_f8(context, builder, sig, args):
fn = builder.module.get_or_insert_function(
lc.Type.function(
lc.Type.double(),
(lc.Type.double(), lc.Type.double())),
'__nv_fmax')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
context.cast(builder, args[1], sig.args[1], types.double),
])
@lower(min, types.f4, types.f4)
def ptx_min_f4(context, builder, sig, args):
fn = builder.module.get_or_insert_function(
lc.Type.function(
lc.Type.float(),
(lc.Type.float(), lc.Type.float())),
'__nv_fminf')
return builder.call(fn, args)
@lower(min, types.f8, types.f4)
@lower(min, types.f4, types.f8)
@lower(min, types.f8, types.f8)
def ptx_min_f8(context, builder, sig, args):
fn = builder.module.get_or_insert_function(
lc.Type.function(
lc.Type.double(),
(lc.Type.double(), lc.Type.double())),
'__nv_fmin')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
context.cast(builder, args[1], sig.args[1], types.double),
])
@lower(round, types.f4)
@lower(round, types.f8)
def ptx_round(context, builder, sig, args):
fn = builder.module.get_or_insert_function(
lc.Type.function(
lc.Type.int(64),
(lc.Type.double(),)),
'__nv_llrint')
return builder.call(fn, [
context.cast(builder, args[0], sig.args[0], types.double),
])
@lower(math.isinf, types.Integer)
@lower(math.isnan, types.Integer)
def math_isinf_isnan_int(context, builder, sig, args):
return lc.Constant.int(lc.Type.int(1), 0)
@lower(math.isfinite, types.Integer)
def math_isfinite_int(context, builder, sig, args):
return lc.Constant.int(lc.Type.int(1), 1)
def gen_deg_rad(const):
def impl(context, builder, sig, args):
argty, = sig.args
factor = context.get_constant(argty, const)
return builder.fmul(factor, args[0])
return impl
_deg2rad = math.pi / 180.
_rad2deg = 180. / math.pi
lower(math.radians, types.f4)(gen_deg_rad(_deg2rad))
lower(math.radians, types.f8)(gen_deg_rad(_deg2rad))
lower(math.degrees, types.f4)(gen_deg_rad(_rad2deg))
lower(math.degrees, types.f8)(gen_deg_rad(_rad2deg))
def _normalize_indices(context, builder, indty, inds):
"""
Convert integer indices into tuple of intp
"""
if indty in types.integer_domain:
indty = types.UniTuple(dtype=indty, count=1)
indices = [inds]
else:
indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(indty, indices)]
return indty, indices
def _atomic_dispatcher(dispatch_fn):
def imp(context, builder, sig, args):
# The common argument handling code
aryty, indty, valty = sig.args
ary, inds, val = args
dtype = aryty.dtype
indty, indices = _normalize_indices(context, builder, indty, inds)
if dtype != valty:
raise TypeError("expect %s but got %s" % (dtype, valty))
if aryty.ndim != len(indty):
raise TypeError("indexing %d-D array with %d-D index" %
(aryty.ndim, len(indty)))
lary = context.make_array(aryty)(context, builder, ary)
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices)
# dispatcher to implementation base on dtype
return dispatch_fn(context, builder, dtype, ptr, val)
return imp
@lower(stubs.atomic.add, types.Array, types.intp, types.Any)
@lower(stubs.atomic.add, types.Array, types.UniTuple, types.Any)
@lower(stubs.atomic.add, types.Array, types.Tuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_add_tuple(context, builder, dtype, ptr, val):
if dtype == types.float32:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_add_float32(lmod),
(ptr, val))
elif dtype == types.float64:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_add_float64(lmod),
(ptr, val))
else:
return builder.atomic_rmw('add', ptr, val, 'monotonic')
@lower(stubs.atomic.max, types.Array, types.intp, types.Any)
@lower(stubs.atomic.max, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.max, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_max(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_max_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_max_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic max with %s array' % dtype)
@lower(stubs.atomic.min, types.Array, types.intp, types.Any)
@lower(stubs.atomic.min, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.min, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_min(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_min_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_min_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic min with %s array' % dtype)
@lower(stubs.atomic.nanmax, types.Array, types.intp, types.Any)
@lower(stubs.atomic.nanmax, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.nanmax, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_nanmax(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_nanmax_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_nanmax_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic max with %s array' % dtype)
@lower(stubs.atomic.nanmin, types.Array, types.intp, types.Any)
@lower(stubs.atomic.nanmin, types.Array, types.Tuple, types.Any)
@lower(stubs.atomic.nanmin, types.Array, types.UniTuple, types.Any)
@_atomic_dispatcher
def ptx_atomic_nanmin(context, builder, dtype, ptr, val):
lmod = builder.module
if dtype == types.float64:
return builder.call(nvvmutils.declare_atomic_nanmin_float64(lmod),
(ptr, val))
elif dtype == types.float32:
return builder.call(nvvmutils.declare_atomic_nanmin_float32(lmod),
(ptr, val))
elif dtype in (types.int32, types.int64):
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
elif dtype in (types.uint32, types.uint64):
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
else:
raise TypeError('Unimplemented atomic min with %s array' % dtype)
@lower(stubs.atomic.compare_and_swap, types.Array, types.Any, types.Any)
def ptx_atomic_cas_tuple(context, builder, sig, args):
aryty, oldty, valty = sig.args
ary, old, val = args
dtype = aryty.dtype
lary = context.make_array(aryty)(context, builder, ary)
zero = context.get_constant(types.intp, 0)
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, (zero,))
if aryty.dtype == types.int32:
lmod = builder.module
return builder.call(nvvmutils.declare_atomic_cas_int32(lmod),
(ptr, old, val))
else:
raise TypeError('Unimplemented atomic compare_and_swap '
'with %s array' % dtype)
# -----------------------------------------------------------------------------
def _get_target_data(context):
return ll.create_target_data(nvvm.data_layout[context.address_size])
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace,
can_dynsized=False):
elemcount = reduce(operator.mul, shape, 1)
# Check for valid shape for this type of allocation.
# Only 1d arrays can be dynamic.
dynamic_smem = elemcount <= 0 and can_dynsized and len(shape) == 1
if elemcount <= 0 and not dynamic_smem:
raise ValueError("array length <= 0")
# Check that we support the requested dtype
other_supported_type = isinstance(dtype, (types.Record, types.Boolean))
if dtype not in types.number_domain and not other_supported_type:
raise TypeError("unsupported type: %s" % dtype)
lldtype = context.get_data_type(dtype)
laryty = Type.array(lldtype, elemcount)
if addrspace == nvvm.ADDRSPACE_LOCAL:
# Special case local address space allocation to use alloca
# NVVM is smart enough to only use local memory if no register is
# available
dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)
else:
lmod = builder.module
# Create global variable in the requested address space
gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace)
# Specify alignment to avoid misalignment bug
align = context.get_abi_sizeof(lldtype)
# Alignment is required to be a power of 2 for shared memory. If it is
# not a power of 2 (e.g. for a Record array) then round up accordingly.
gvmem.align = 1 << (align - 1 ).bit_length()
if dynamic_smem:
gvmem.linkage = lc.LINKAGE_EXTERNAL
else:
## Comment out the following line to workaround a NVVM bug
## which generates a invalid symbol name when the linkage
## is internal and in some situation.
## See _get_unique_smem_id()
# gvmem.linkage = lc.LINKAGE_INTERNAL
gvmem.initializer = lc.Constant.undef(laryty)
# Convert to generic address-space
conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)
addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace))
dataptr = builder.call(conv, [addrspaceptr])
targetdata = _get_target_data(context)
lldtype = context.get_data_type(dtype)
itemsize = lldtype.get_abi_size(targetdata)
# Compute strides
laststride = itemsize
rstrides = []
for i, lastsize in enumerate(reversed(shape)):
rstrides.append(laststride)
laststride *= lastsize
strides = [s for s in reversed(rstrides)]
kstrides = [context.get_constant(types.intp, s) for s in strides]
# Compute shape
if dynamic_smem:
# Compute the shape based on the dynamic shared memory configuration.
# Unfortunately NVVM does not provide an intrinsic for the
# %dynamic_smem_size register, so we must read it using inline
# assembly.
get_dynshared_size = InlineAsm.get(Type.function(Type.int(), []),
"mov.u32 $0, %dynamic_smem_size;",
'=r', side_effect=True)
dynsmem_size = builder.zext(builder.call(get_dynshared_size, []),
Type.int(width=64))
# Only 1-D dynamic shared memory is supported so the following is a
# sufficient construction of the shape
kitemsize = context.get_constant(types.intp, itemsize)
kshape = [builder.udiv(dynsmem_size, kitemsize)]
else:
kshape = [context.get_constant(types.intp, s) for s in shape]
# Create array object
ndim = len(shape)
aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')
ary = context.make_array(aryty)(context, builder)
context.populate_array(ary,
data=builder.bitcast(dataptr, ary.data.type),
shape=kshape,
strides=kstrides,
itemsize=context.get_constant(types.intp, itemsize),
meminfo=None)
return ary._getvalue()
| {
"repo_name": "sklam/numba",
"path": "numba/cuda/cudaimpl.py",
"copies": "1",
"size": "27366",
"license": "bsd-2-clause",
"hash": 7769188903916740000,
"line_mean": 35.5367156208,
"line_max": 80,
"alpha_frac": 0.6429145655,
"autogenerated": false,
"ratio": 3.219529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4362443977264706,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
import pprint
import uuid
from celery import states
from django.db import models
from django.contrib import admin
from django.utils.html import format_html
from project import celery_app
from share.admin.util import FuzzyPaginator
class TaskNameFilter(admin.SimpleListFilter):
title = 'Task'
parameter_name = 'task_name'
def lookups(self, request, model_admin):
celery_app.autodiscover_tasks([
'share',
'share.janitor',
'bots.elasticsearch',
], force=True)
return sorted((x, x) for x in celery_app.tasks.keys())
def queryset(self, request, queryset):
if self.value():
return queryset.filter(task_name=self.value())
return queryset
class StatusFilter(admin.SimpleListFilter):
title = 'Status'
parameter_name = 'status'
def lookups(self, request, model_admin):
return sorted((x, x.title()) for x in states.ALL_STATES)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(status=self.value().upper())
return queryset
class CeleryTaskResultAdmin(admin.ModelAdmin):
list_display = ('task_id', 'task_name', 'status_', 'source_', 'date_modified', 'date_created', 'share_version')
exclude = ('correlation_id', )
actions = ('retry', )
ordering = ('-date_modified', )
list_filter = (TaskNameFilter, StatusFilter, )
readonly_fields = (
'task_id',
'task_name',
'task_args', 'task_kwargs',
'result', 'traceback',
'meta_',
'date_created', 'date_modified',
'share_version'
)
show_full_result_count = False
paginator = FuzzyPaginator
search_fields = ('task_name', )
STATUS_COLORS = {
states.SUCCESS: 'green',
states.FAILURE: 'red',
states.STARTED: 'cyan',
states.RETRY: 'orange',
}
def get_search_results(self, request, queryset, search_term):
try:
return queryset.filter(task_id=uuid.UUID(search_term)), False
except ValueError:
pass
# Overriden because there is no way to opt out of a case insensitive search
search_fields = self.get_search_fields(request)
use_distinct = bool(search_term)
if search_fields and search_term:
orm_lookups = ['{}__startswith'.format(search_field) for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
return queryset, use_distinct
def task_args(self, obj):
return obj.meta['args']
def task_kwargs(self, obj):
return pprint.pformat(obj.meta['kwargs'])
def status_(self, obj):
return format_html(
'<span style="font-weight: bold; color: {}">{}</span>',
self.STATUS_COLORS.get(obj.status, 'black'),
obj.status.title()
)
status_.short_description = 'Status'
def meta_(self, obj):
return pprint.pformat(obj.meta)
meta_.short_description = 'Meta'
def source_(self, obj):
return obj.meta.get('source_config') or obj.meta.get('source')
source_.short_description = 'Source'
def retry(self, request, queryset):
for task in queryset:
celery_app.tasks[task.task_name].apply_async(
task.meta.get('args', ()),
task.meta.get('kwargs', {}),
task_id=str(task.task_id)
)
retry.short_description = 'Retry Tasks'
| {
"repo_name": "aaxelb/SHARE",
"path": "share/admin/celery.py",
"copies": "1",
"size": "3689",
"license": "apache-2.0",
"hash": 4370947550267204000,
"line_mean": 29.7416666667,
"line_max": 115,
"alpha_frac": 0.6072106262,
"autogenerated": false,
"ratio": 3.949678800856531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5056889427056531,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
import pytz
from django.contrib import messages
from django.utils import timezone
def get_report_date(request):
user_tz = pytz.timezone(request.user.profile.timezone)
date_string = request.GET.get('report_date')
if date_string:
try:
report_date = user_tz.localize(
timezone.datetime.strptime(date_string, '%m/%d/%Y')
)
except ValueError:
messages.error(request, 'Invalid date.')
report_date = user_tz.normalize(timezone.now())
else:
report_date = user_tz.normalize(timezone.now())
return report_date
def get_normalized_date(date_string, user):
user_tz = pytz.timezone(user.profile.timezone)
if date_string:
normalized = user_tz.localize(
timezone.datetime.strptime(date_string, '%m/%d/%Y')
)
else:
normalized = user_tz.normalize(timezone.now())
return normalized
def date_is_today(date):
tz = date.tzinfo
nowtz = timezone.now().astimezone(tz=tz)
year, month, day = date.year, date.month, date.day
return all([year == nowtz.year, month == nowtz.month, day == nowtz.day])
def get_dates_for_week_of(date):
"""
Get a list of seven datetime objects representing the full week (Monday to
Sunday) of the given date.
"""
year, week, dow = date.isocalendar()
deltas = [(d + 1 - dow) for d in range(7)]
return [(date + timezone.timedelta(days=d)) for d in deltas]
def get_flat_list_of_categories_and_timers(base_cat):
def _add_to_list(category):
l = [category]
l += [t for t in category.timer_set.all()]
for subcat in category.category_set.all():
l += _add_to_list(subcat)
return l
return _add_to_list(base_cat)
def get_totals_for_dates(base_cat, dates, full=False):
"""
Get a flat list of 3-tuples for every reportable category and timer of the
given base category, summarizing the time logged on the given dates. If
full is True, do this for EVERY category and timer.
Return format:
[
(<category|timer>, total, [time on dates[0], time2 on dates[1], ...]),
(<category|timer>, total, [time on dates[0], time2 on dates[1], ...]),
]
"""
all_totals = []
for item in get_flat_list_of_categories_and_timers(base_cat):
if not full and not item.show_in_selective_reports:
continue
totals = [item.get_total_time_on_date(date) for date in dates]
total = reduce(operator.add, totals)
if total.total_seconds() > 0:
all_totals.append((item, total, totals))
return all_totals
| {
"repo_name": "peap/djarzeit",
"path": "reports/utils.py",
"copies": "1",
"size": "2683",
"license": "mit",
"hash": 7110657426809058000,
"line_mean": 31.7195121951,
"line_max": 78,
"alpha_frac": 0.6276556094,
"autogenerated": false,
"ratio": 3.5630810092961487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46907366186961486,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
import re
from dal import autocomplete
from django.contrib.auth.models import Group
from django.conf.urls import url
from django.db.models import Q, Count
from workshops import models
from workshops.util import OnlyForAdminsNoRedirectMixin
class EventLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def get_queryset(self):
results = models.Event.objects.all()
if self.q:
results = results.filter(slug__icontains=self.q)
return results
class TTTEventLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def get_queryset(self):
results = models.Event.objects.filter(tags__name='TTT')
if self.q:
results = results.filter(slug__icontains=self.q)
return results
class OrganizationLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def get_queryset(self):
results = models.Organization.objects.all()
if self.q:
results = results.filter(
Q(domain__icontains=self.q) | Q(fullname__icontains=self.q)
)
return results
class PersonLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def get_queryset(self):
results = models.Person.objects.all()
if self.q:
filters = [
Q(personal__icontains=self.q),
Q(family__icontains=self.q),
Q(email__icontains=self.q),
Q(username__icontains=self.q)
]
# split query into first and last names
tokens = re.split('\s+', self.q)
if len(tokens) == 2:
name1, name2 = tokens
complex_q = (
Q(personal__icontains=name1) & Q(family__icontains=name2)
) | (
Q(personal__icontains=name2) & Q(family__icontains=name1)
)
filters.append(complex_q)
# this is brilliant: it applies OR to all search filters
results = results.filter(reduce(operator.or_, filters))
return results
class AdminLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
"""The same as PersonLookup, but allows only to select administrators.
Administrator is anyone with superuser power or in "administrators" group.
"""
def get_queryset(self):
admin_group = Group.objects.get(name='administrators')
results = models.Person.objects.filter(
Q(is_superuser=True) | Q(groups__in=[admin_group])
)
if self.q:
results = results.filter(
Q(personal__icontains=self.q) |
Q(family__icontains=self.q) |
Q(email__icontains=self.q) |
Q(username__icontains=self.q)
)
return results
class AirportLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def get_queryset(self):
results = models.Airport.objects.all()
if self.q:
results = results.filter(
Q(iata__icontains=self.q) | Q(fullname__icontains=self.q)
)
return results
class LanguageLookupView(OnlyForAdminsNoRedirectMixin,
autocomplete.Select2QuerySetView):
def dispatch(self, request, *args, **kwargs):
self.subtag = 'subtag' in request.GET.keys()
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
results = models.Language.objects.all()
if self.q:
results = results.filter(
Q(name__icontains=self.q) | Q(subtag__icontains=self.q)
)
if self.subtag:
return results.filter(subtag__iexact=self.q)
results = results.annotate(person_count=Count('person')) \
.order_by('-person_count')
return results
# trainees lookup?
urlpatterns = [
url(r'^events/$', EventLookupView.as_view(), name='event-lookup'),
url(r'^ttt_events/$', TTTEventLookupView.as_view(), name='ttt-event-lookup'),
url(r'^organizations/$', OrganizationLookupView.as_view(), name='organization-lookup'),
url(r'^persons/$', PersonLookupView.as_view(), name='person-lookup'),
url(r'^admins/$', AdminLookupView.as_view(), name='admin-lookup'),
url(r'^airports/$', AirportLookupView.as_view(), name='airport-lookup'),
url(r'^languages/$', LanguageLookupView.as_view(), name='language-lookup'),
]
| {
"repo_name": "vahtras/amy",
"path": "workshops/lookups.py",
"copies": "1",
"size": "4742",
"license": "mit",
"hash": 3227650137332039700,
"line_mean": 30.6133333333,
"line_max": 91,
"alpha_frac": 0.6012231126,
"autogenerated": false,
"ratio": 4.127067014795474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228290127395474,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
import textwrap
from plugins import BasePlugin
__author__ = 'peter'
class XORPlugin(BasePlugin):
short_description = 'XOR all hexstring input(s) with eachother'
header = 'XOR of the hex inputs'
default = False
description = textwrap.dedent('''\
This plugin XOR's all hexstrings from the input with eachother, in order.''')
key = '--xor'
def sentinel(self):
# Only parse if all inputs are valid hex strings
try:
for s in self.args['STRING']:
int(s, 16)
except ValueError:
return False
# Only parse if there are more than one string
return len(self.args['STRING']) > 1
def handle(self):
ints = [int(x, 16) for x in self.args['STRING']]
max_len = max(map(len, map(hex, ints))) - 2 # Remove 0x
result = '\n'.join('0x{:0{ml}x}'.format(x, ml=max_len) for x in ints)
result += '\n' + '=' * (max_len + 2) + ' ^'
result += '\n0x{:0{ml}x}'.format(reduce(operator.xor, ints), ml=max_len)
return result
| {
"repo_name": "Sakartu/stringinfo",
"path": "plugins/xor_plugin.py",
"copies": "1",
"size": "1107",
"license": "mit",
"hash": 1606363866072719000,
"line_mean": 28.1315789474,
"line_max": 81,
"alpha_frac": 0.5889792231,
"autogenerated": false,
"ratio": 3.6058631921824102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46948424152824103,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
arabic_to_roman = {
1: "I",
5: "V",
10: "X",
50: "L",
100: "C",
500: "D",
1000: "M"}
roman_to_arabic = {roman: arabic for (arabic, roman) in arabic_to_roman.items()}
MAX = 4999
def unfold(f, initial_state):
next_item, state = f(initial_state)
yield next_item
while state:
next_item, state = f(state)
yield next_item
def summands_of(x):
def unfolder(remaining, power):
return ((remaining % 10), power), \
(remaining // 10, power + 1) if remaining > 9 else None
return (x for x in unfold(lambda state: unfolder(*state), (x, 0)) if x != 0)
def summand_to_roman(digit, power):
multiplier = 10 ** power
lower_roman = arabic_to_roman[multiplier]
medium_roman = arabic_to_roman[multiplier * 5] if multiplier * 5 in arabic_to_roman else None
high_roman = arabic_to_roman[multiplier * 10] if multiplier * 10 in arabic_to_roman else None
if digit <= 3 or not medium_roman:
return lower_roman * digit
elif digit <= 8:
return max(5 - digit, 0) * lower_roman + medium_roman + max(digit - 5, 0) * lower_roman
else:
return lower_roman + high_roman
def convert_arabic_to_roman(number):
if number > MAX:
raise Exception("Only values smaller than {} are supported".format(MAX))
summands = sorted(summands_of(number), key=lambda x: x[1], reverse=True)
summands_in_roman = (summand_to_roman(digit, power) for (digit, power) in summands)
return reduce(operator.concat, summands_in_roman)
def convert_roman_to_arabic(number):
def summands(number):
for (index, digit) in enumerate(number):
summand = roman_to_arabic[digit]
yield summand \
if (index + 1) == len(number) or roman_to_arabic[number[index + 1]] <= summand \
else -summand
return reduce(operator.add, summands(number.upper()))
| {
"repo_name": "takemyoxygen/playground",
"path": "py/numerals/numerals.py",
"copies": "1",
"size": "1964",
"license": "mit",
"hash": -8238291038610440000,
"line_mean": 28.3134328358,
"line_max": 97,
"alpha_frac": 0.6206720978,
"autogenerated": false,
"ratio": 3.323181049069374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44438531468693737,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Q
from rest_framework.filters import BaseFilterBackend
class CMSPagesFilterBackend(BaseFilterBackend):
"""
Use this backend to only show products assigned to the current page.
"""
cms_pages_fields = ['cms_pages']
def _get_filtered_queryset(self, current_page, queryset, cms_pages_fields):
filter_by_cms_page = (Q((field, current_page)) for field in cms_pages_fields)
return queryset.filter(reduce(operator.or_, filter_by_cms_page)).distinct()
def filter_queryset(self, request, queryset, view):
cms_pages_fields = getattr(view, 'cms_pages_fields', self.cms_pages_fields)
if not isinstance(cms_pages_fields, (list, tuple)):
msg = "`cms_pages_fields` must be a list or tuple of fields referring to djangoCMS pages."
raise ImproperlyConfigured(msg)
current_page = request.current_page
if current_page.publisher_is_draft:
current_page = current_page.publisher_public
return self._get_filtered_queryset(current_page, queryset, cms_pages_fields)
class RecursiveCMSPagesFilterBackend(CMSPagesFilterBackend):
"""
Use this backend to show products assigned to the current page or any of its descendants.
"""
def _get_filtered_queryset(self, current_page, queryset, cms_pages_fields):
pages = current_page.get_descendants(include_self=True)
filter_by_cms_page = (Q((field + "__in", pages)) for field in self.cms_pages_fields)
return queryset.filter(reduce(operator.or_, filter_by_cms_page)).distinct()
| {
"repo_name": "awesto/django-shop",
"path": "shop/rest/filters.py",
"copies": "1",
"size": "1687",
"license": "bsd-3-clause",
"hash": -3719191531598573000,
"line_mean": 42.2564102564,
"line_max": 102,
"alpha_frac": 0.7065797273,
"autogenerated": false,
"ratio": 3.9881796690307327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5194759396330733,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.