text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import reduce
# Sort in O(n log(n)) time and O(n) space
def sort_high_to_low(integers):
result = None
for i in range(len(integers)):
if result is None:
result = [integers[i]]
continue
for j in range(len(result)):
if integers[i] <= result[len(result)-1]:
result += integers[i:i+1]
break
if integers[i] > result[j]:
result = result[0:j] + integers[i:i+1] + result[j:]
break
return result
# Given a list_of_ints, find the highest_product you can get from three of the integers.
# The input list_of_ints will always have at least three integers.
def get_highest_product(integers):
if len(integers) < 3:
raise IndexError("Input integers needs to be at least three integers")
highest = max(integers[:2])
lowest = min(integers[:2])
highest_product_of_2 = lowest_product_of_2 = reduce(lambda x, y: x * y, integers[:2])
highest_product_of_3 = reduce(lambda x, y: x * y, integers[:3])
for current in integers[2:]:
highest_product_of_3 = max(highest_product_of_3
, current*highest_product_of_2
, current*lowest_product_of_2)
highest_product_of_2 = max(highest_product_of_2
, current*highest
, current*lowest)
lowest_product_of_2 = min(lowest_product_of_2
, current*highest
, current*lowest)
highest = max(highest, current)
lowest = min(lowest, current)
return highest_product_of_3
| {
"repo_name": "JDFagan/InterviewInPython",
"path": "interviewcake/highest_product.py",
"copies": "1",
"size": "1719",
"license": "mit",
"hash": -4054943587126883000,
"line_mean": 34.8125,
"line_max": 89,
"alpha_frac": 0.5445026178,
"autogenerated": false,
"ratio": 3.988399071925754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5032901689725754,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
number = 11
data = [[ 8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8],
[49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],
[81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],
[52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],
[22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],
[24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],
[32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],
[67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],
[24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],
[21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],
[78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],
[16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],
[86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],
[19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],
[ 4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],
[88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],
[ 4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],
[20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],
[20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],
[ 1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]]
def get_next_square():
row_max, col_max, offset = 17, 17, 4
for row in range(row_max):
for col in range(col_max):
result = [[data[x][y] for y in range(row,row+offset)] for x in range(col,col+offset)]
yield result
def find_max_product(matrix):
result_val = 0
result_mat = []
#sprawdzamy wiersze
for row in matrix:
tmp = reduce(lambda x,y: x*y, row)
if result_val < tmp:
result_val = tmp
result_mat = row
#sprawdzamy kolumny
for index in [0,1,2,3]:
tmp = matrix[0][index]*matrix[1][index]*matrix[2][index]*matrix[3][index]
if result_val < tmp:
result_val = tmp
result_mat = [matrix[0][index],matrix[1][index],matrix[2][index],matrix[3][index]]
#sprawdzamy przekatne
tmp = matrix[0][0]*matrix[1][1]*matrix[2][2]*matrix[3][3]
if result_val < tmp:
result_val = tmp
result_mat = [matrix[0][0],matrix[1][1],matrix[2][2],matrix[3][3]]
tmp = matrix[0][3]*matrix[1][2]*matrix[2][1]*matrix[3][0]
if result_val < tmp:
result_val = tmp
result_mat = [matrix[0][3],matrix[1][2],matrix[2][1],matrix[3][0]]
return (result_val,result_mat,matrix)
if __name__ == '__main__':
result = (0,[],[])
for mx in get_next_square():
tmp = find_max_product(mx)
if result[0] < tmp[0]:
result = tmp
print("Result: %s" % str(result))
| {
"repo_name": "rhinox/ProjectEuler",
"path": "src/main/python/problem_11.py",
"copies": "1",
"size": "3173",
"license": "mit",
"hash": 4039884350456360400,
"line_mean": 46.3582089552,
"line_max": 97,
"alpha_frac": 0.501103057,
"autogenerated": false,
"ratio": 2.2761836441893832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32772867011893836,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
try:
from itertools import zip_longest
from unittest.mock import patch as _patch, Mock as _Mock
from unittest.mock import call as _call, ANY as _ANY
except ImportError:
from mock import patch as _patch, Mock as _Mock
from mock import call as _call, ANY as _ANY
from itertools import izip_longest as zip_longest
from hiku.expr.refs import Ref, NamedRef
patch = _patch
Mock = _Mock
call = _call
ANY = _ANY
_missing = type('<missing>', (object,), {})
def result_match(result, value, path=None):
path = [] if path is None else path
if result is _missing:
return False, path, result, value
if isinstance(value, dict):
for k, v in value.items():
ok, sp, sr, sv = result_match(result[k], v, path + [k])
if not ok:
return ok, sp, sr, sv
elif isinstance(value, (list, tuple)):
pairs = zip_longest(result, value, fillvalue=_missing)
for i, (v1, v2) in enumerate(pairs):
ok, sp, sr, sv = result_match(v1, v2, path + [i])
if not ok:
return ok, sp, sr, sv
elif result != value:
return False, path, result, value
return True, None, None, None
def check_result(result, value):
ok, path, subres, subval = result_match(result, value)
if not ok:
path_str = 'result' + ''.join('[{!r}]'.format(v) for v in path)
msg = ('Result mismatch, first different element '
'path: {}, value: {!r}, expected: {!r}'
.format(path_str, subres, subval))
raise AssertionError(msg)
def _ref_reducer(backref, item):
name, to = item
if name is None:
return Ref(backref, to)
else:
return NamedRef(backref, name, to)
def ref(chain):
return reduce(_ref_reducer, reversed(chain), None)
| {
"repo_name": "vmagamedov/hiku",
"path": "tests/base.py",
"copies": "1",
"size": "1846",
"license": "bsd-3-clause",
"hash": 2399954279884178400,
"line_mean": 28.7741935484,
"line_max": 71,
"alpha_frac": 0.6013001083,
"autogenerated": false,
"ratio": 3.563706563706564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4665006672006564,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
# version code d345910f07ae
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
## 1: (Task 1) Movie Review
## Task 1
def movie_review(name):
from random import randint
"""
Input: the name of a movie
Output: a string (one of the review options), selected at random using randint
"""
reviews = [ "Outstanding!", "Terribly aweful!", "My hamster could make a better movie than that!", "Eh..." ]
return reviews[randint(0,3)]
## 2: (Task 2) Make Inverse Index
def makeInverseIndex(strlist):
"""
Input: a list of documents as strings
Output: a dictionary that maps each word in any document to the set consisting of the
document ids (ie, the index in the strlist) for all documents containing the word.
Distinguish between an occurence of a string (e.g. "use") in the document as a word
(surrounded by spaces), and an occurence of the string as a substring of a word (e.g. "because").
Only the former should be represented in the inverse index.
Feel free to use a loop instead of a comprehension.
Example:
>>> makeInverseIndex(['hello world','hello','hello cat','hellolot of cats']) == {'hello': {0, 1, 2}, 'cat': {2}, 'of': {3}, 'world': {0}, 'cats': {3}, 'hellolot': {3}}
True
"""
split_list = [ x.split() for x in strlist ]
d = dict()
for idx in range(len(split_list)):
for word in split_list[idx]:
d[word].add(idx) if word in d else d.setdefault(word, {idx})
return d
## 3: (Task 3) Or Search
def orSearch(inverseIndex, query):
"""
Input: an inverse index, as created by makeInverseIndex, and a list of words to query
Output: the set of document ids that contain _any_ of the specified words
Feel free to use a loop instead of a comprehension.
>>> idx = makeInverseIndex(['Johann Sebastian Bach', 'Johannes Brahms', 'Johann Strauss the Younger', 'Johann Strauss the Elder', ' Johann Christian Bach', 'Carl Philipp Emanuel Bach'])
>>> orSearch(idx, ['Bach','the'])
{0, 2, 3, 4, 5}
>>> orSearch(idx, ['Johann', 'Carl'])
{0, 2, 3, 4, 5}
"""
return reduce(lambda a, b: a | b, [ inverseIndex[word] for word in query ])
## 4: (Task 4) And Search
def andSearch(inverseIndex, query):
"""
Input: an inverse index, as created by makeInverseIndex, and a list of words to query
Output: the set of all document ids that contain _all_ of the specified words
Feel free to use a loop instead of a comprehension.
>>> idx = makeInverseIndex(['Johann Sebastian Bach', 'Johannes Brahms', 'Johann Strauss the Younger', 'Johann Strauss the Elder', ' Johann Christian Bach', 'Carl Philipp Emanuel Bach'])
>>> andSearch(idx, ['Johann', 'the'])
{2, 3}
>>> andSearch(idx, ['Johann', 'Bach'])
{0, 4}
"""
return reduce(lambda a, b: a & b, [ inverseIndex[word] for word in query ])
| {
"repo_name": "josiah14/linear-algebra",
"path": "programming-the-matrix/0-week/inverse-index-lab/Python/inverse_index_lab.py",
"copies": "1",
"size": "2945",
"license": "mit",
"hash": -7016810641730002000,
"line_mean": 38.7972972973,
"line_max": 190,
"alpha_frac": 0.6522920204,
"autogenerated": false,
"ratio": 3.3580387685290765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4510330788929076,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
# V is sparse Vector with O(k)=o(n) elements
# Time: O(1)
def getitem(v,d):
"Returns the value of entry d in v"
assert d in v.D
return 0 if d not in v.f else v.f[d];
# Time: O(1)
def setitem(v,d,val):
"Set the element of v with label d to be val"
assert d in v.D
v.f[d] = val
# Time: O(k(u) + k(v))
def equal(u,v):
"Returns true iff u is equal to v"
assert u.D == v.D
for k in set(u.f.keys()) | set(v.f.keys()):
if u[k]!=v[k]:
return False
return True
def add(u,v):
"Returns the sum of the two vectors"
assert u.D == v.D
return Vec(v.D, {d:u[d]+v[d] for d in u.D if u[d] or v[d]})
def dot(u,v):
"Returns the dot product of the two vectors"
assert u.D == v.D
return sum( (u[d]*v[d] for d in u.D if u[d] and v[d]) )
def scalar_mul(v, alpha):
"Returns the scalar-vector product alpha times v"
return Vec(v.D, {k:alpha*f for k,f in v.f.items() if f})
def neg(v):
"Returns the negation of a vector"
return Vec(v.D, {k:-f for k,f in v.f.items() if f})
##### NO NEED TO MODIFY BELOW HERE #####
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
def __sub__(a,b):
"Returns a vector which is the difference of a and b."
return a+(-b)
__eq__ = equal
def __str__(v):
"pretty-printing"
try:
D_list = sorted(v.D)
except TypeError:
D_list = sorted(v.D, key=hash)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
# w = 1+max([len(str(k)) for k in D_list]+[len('{0:.{1}G}'.format(value,numdec)) for value in v.f.values()])
s1 = ''.join(['{0:>{1}}'.format(k,wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
| {
"repo_name": "mgall/coding-the-matrix",
"path": "matrixlib/vec.py",
"copies": "1",
"size": "3228",
"license": "mit",
"hash": 3931624128291188700,
"line_mean": 30.9603960396,
"line_max": 198,
"alpha_frac": 0.5477075589,
"autogenerated": false,
"ratio": 2.977859778597786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4025567337497786,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
voting_data = list(open("voting_record_dump109.txt"))
## Task 1
def create_voting_dict():
"""
Input: None (use voting_data above)
Output: A dictionary that maps the last name of a senator
to a list of numbers representing the senator's voting
record.
Example:
>>> create_voting_dict()['Clinton']
[-1, 1, 1, 1, 0, 0, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, ... ]
This procedure should return a dictionary that maps the last name
of a senator to a list of numbers representing that senator's
voting record, using the list of strings from the dump file (strlist). You
will need to use the built-in procedure int() to convert a string
representation of an integer (e.g. '1') to the actual integer
(e.g. 1).
You can use the split() procedure to split each line of the
strlist into a list; the first element of the list will be the senator's
name, the second will be his/her party affiliation (R or D), the
third will be his/her home state, and the remaining elements of
the list will be that senator's voting record on a collection of bills.
A "1" represents a 'yea' vote, a "-1" a 'nay', and a "0" an abstention.
The lists for each senator should preserve the order listed in voting data.
"""
name = lambda senator: senator.split(" ")[0]
votes = lambda senator: map(int,senator.split[3:])
senators = open('voting_record_dump109.txt','r')
voting = { name(s):votes(s) for s in list(senators) }
senators.close()
return voting
## Task 2
def policy_compare(sen_a, sen_b, voting):
"""
Input: last names of sen_a and sen_b, and a voting dictionary mapping senator
names to lists representing their voting records.
Output: the dot-product (as a number) representing the degree of similarity
between two senators' voting policies
Example:
>>> voting_dict = {'Fox-Epstein':[-1,-1,-1,1],'Ravella':[1,1,1,1]}
>>> policy_compare('Fox-Epstein','Ravella', voting_dict)
-2
"""
return sum((a*b for a,b in zip(voting[sen_a],voting[sen_b])))
## Task 3
def most_similar(sen, voting):
"""
Input: the last name of a senator, and a dictionary mapping senator names
to lists representing their voting records.
Output: the last name of the senator whose political mindset is most
like the input senator (excluding, of course, the input senator
him/herself). Resolve ties arbitrarily.
Example:
>>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]}
>>> most_similar('Klein', vd)
'Fox-Epstein'
Note that you can (and are encouraged to) re-use you policy_compare procedure.
"""
sim = max(( (policy_compare(sen,sen_c,voting), sen_c) for sen_c in voting if sen_c != sen ))
return sim[1]
## Task 4
def least_similar(sen, voting):
"""
Input: the last name of a senator, and a dictionary mapping senator names
to lists representing their voting records.
Output: the last name of the senator whose political mindset is least like the input
senator.
Example:
>>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]}
>>> least_similar('Klein', vd)
'Ravella'
"""
sim = min(( (policy_compare(sen,sen_c,voting), sen_c) for sen_c in voting if sen_c != sen ))
return sim[1]
## Task 5
most_like_chafee = most_similar('Chafee',create_voting_dict())
least_like_santorum = least_similar('Santorum',create_voting_dict())
# Task 6
def find_average_similarity(sen, sen_set, voting):
"""
Input: the name of a senator, a set of senator names, and a voting dictionary.
Output: the average dot-product between sen and those in sen_set.
Example:
>>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]}
>>> find_average_similarity('Klein', {'Fox-Epstein','Ravella'}, vd)
-0.5
"""
return sum((policy_compare(sen,sen_c,voting) for sen_c in sen_set)) / len(sen_set)
most_average_Democrat = 'Biden'
# Task 7
def find_average_record(sen_set, voting):
"""
Input: a set of last names, a voting dictionary
Output: a vector containing the average components of the voting records
of the senators in the input set
Example:
>>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1],\ 'Ravella': [0,0,1]}
>>> find_average_record({'Fox-Epstein','Ravella'}, voting_dict)
[-0.5, -0.5, 0.0]
"""
sens = (voting[sen] for sen in sen_set)
avg = reduce(lambda s1,s2: [v1+v2 for v1,v2 in zip(s1,s2)], sens)
n = len(sen_set)
return [v/n for v in avg]
average_Democrat_record = [-0.16279069767441862, -0.23255813953488372, 1.0,
0.8372093023255814, 0.9767441860465116, -0.13953488372093023, -0.9534883720930233,
0.813953488372093, 0.9767441860465116, 0.9767441860465116, 0.9069767441860465,
0.7674418604651163, 0.6744186046511628, 0.9767441860465116, -0.5116279069767442,
0.9302325581395349, 0.9534883720930233, 0.9767441860465116, -0.3953488372093023,
0.9767441860465116, 1.0, 1.0, 1.0, 0.9534883720930233, -0.4883720930232558,
1.0, -0.32558139534883723, -0.06976744186046512, 0.9767441860465116,
0.8604651162790697, 0.9767441860465116, 0.9767441860465116, 1.0, 1.0,
0.9767441860465116,-0.3488372093023256, 0.9767441860465116, -0.4883720930232558,
0.23255813953488372, 0.8837209302325582, 0.4418604651162791, 0.9069767441860465,
-0.9069767441860465, 1.0, 0.9069767441860465, -0.3023255813953488]
# Task 8
def bitter_rivals(voting):
"""
Input: a dictionary mapping senator names to lists representing
their voting records
Output: a tuple containing the two senators who most strongly
disagree with one another.
Example:
>>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]}
>>> bitter_rivals(voting_dict)
('Fox-Epstein', 'Ravella')
"""
sens = voting.keys()
bitter = min( ( (policy_compare(s1,s2,voting),s1,s2) for s1 in sens for s2 in sens if s1!=s2) )
return (bitter[1], bitter[2])
| {
"repo_name": "mgall/coding-the-matrix",
"path": "labs/03-lab-politics/politics_lab.py",
"copies": "1",
"size": "6273",
"license": "mit",
"hash": -480726516447352450,
"line_mean": 37.4846625767,
"line_max": 100,
"alpha_frac": 0.6438705564,
"autogenerated": false,
"ratio": 2.8854645814167434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40293351378167436,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, partial
import inspect
import operator
from operator import attrgetter
from textwrap import dedent
from .compatibility import PY3, PY33, PY34, PYPY, import_module
from .utils import no_default
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry', 'flip', 'excepts')
def identity(x):
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def instanceproperty(fget=None, fset=None, fdel=None, doc=None, classval=None):
""" Like @property, but returns ``classval`` when used as a class attribute
>>> class MyClass(object):
... '''The class docstring'''
... @instanceproperty(classval=__doc__)
... def __doc__(self):
... return 'An object docstring'
... @instanceproperty
... def val(self):
... return 42
...
>>> MyClass.__doc__
'The class docstring'
>>> MyClass.val is None
True
>>> obj = MyClass()
>>> obj.__doc__
'An object docstring'
>>> obj.val
42
"""
if fget is None:
return partial(instanceproperty, fset=fset, fdel=fdel, doc=doc,
classval=classval)
return InstanceProperty(fget=fget, fset=fset, fdel=fdel, doc=doc,
classval=classval)
class InstanceProperty(property):
""" Like @property, but returns ``classval`` when used as a class attribute
Should not be used directly. Use ``instanceproperty`` instead.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None,
classval=None):
self.classval = classval
property.__init__(self, fget=fget, fset=fset, fdel=fdel, doc=doc)
def __get__(self, obj, type=None):
if obj is None:
return self.classval
return property.__get__(self, obj, type)
def __reduce__(self):
state = (self.fget, self.fset, self.fdel, self.__doc__, self.classval)
return InstanceProperty, state
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
https://toolz.readthedocs.io/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (
hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)
):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
self.__module__ = getattr(func, '__module__', None)
self.__qualname__ = getattr(func, '__qualname__', None)
self._sigspec = None
self._has_unknown_args = None
@instanceproperty
def func(self):
return self._partial.func
if PY3: # pragma: py2 no cover
@instanceproperty
def __signature__(self):
sig = inspect.signature(self.func)
args = self.args or ()
keywords = self.keywords or {}
if is_partial_args(self.func, args, keywords, sig) is False:
raise TypeError('curry object has incorrect arguments')
params = list(sig.parameters.values())
skip = 0
for param in params[:len(args)]:
if param.kind == param.VAR_POSITIONAL:
break
skip += 1
kwonly = False
newparams = []
for param in params[skip:]:
kind = param.kind
default = param.default
if kind == param.VAR_KEYWORD:
pass
elif kind == param.VAR_POSITIONAL:
if kwonly:
continue
elif param.name in keywords:
default = keywords[param.name]
kind = param.KEYWORD_ONLY
kwonly = True
else:
if kwonly:
kind = param.KEYWORD_ONLY
if default is param.empty:
default = no_default
newparams.append(param.replace(default=default, kind=kind))
return sig.replace(parameters=newparams)
@instanceproperty
def args(self):
return self._partial.args
@instanceproperty
def keywords(self):
return self._partial.keywords
@instanceproperty
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
def _should_curry(self, args, kwargs, exc=None):
func = self.func
args = self.args + args
if self.keywords:
kwargs = dict(self.keywords, **kwargs)
if self._sigspec is None:
sigspec = self._sigspec = _sigs.signature_or_spec(func)
self._has_unknown_args = has_varargs(func, sigspec) is not False
else:
sigspec = self._sigspec
if is_partial_args(func, args, kwargs, sigspec) is False:
# Nothing can make the call valid
return False
elif self._has_unknown_args:
# The call may be valid and raised a TypeError, but we curry
# anyway because the function may have `*args`. This is useful
# for decorators with signature `func(*args, **kwargs)`.
return True
elif not is_valid_args(func, args, kwargs, sigspec):
# Adding more arguments may make the call valid
return True
else:
# There was a genuine TypeError
return False
def bind(self, *args, **kwargs):
return type(self)(self, *args, **kwargs)
def call(self, *args, **kwargs):
return self._partial(*args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
def __reduce__(self):
func = self.func
modname = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
if qualname is None: # pragma: py3 no cover
qualname = getattr(func, '__name__', None)
is_decorated = None
if modname and qualname:
attrs = []
obj = import_module(modname)
for attr in qualname.split('.'):
if isinstance(obj, curry): # pragma: py2 no cover
attrs.append('func')
obj = obj.func
obj = getattr(obj, attr, None)
if obj is None:
break
attrs.append(attr)
if isinstance(obj, curry) and obj.func is func:
is_decorated = obj is self
qualname = '.'.join(attrs)
func = '%s:%s' % (modname, qualname)
# functools.partial objects can't be pickled
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k not in ('_partial', '_sigspec'))
state = (type(self), func, self.args, self.keywords, userdict,
is_decorated)
return (_restore_curry, state)
def _restore_curry(cls, func, args, kwargs, userdict, is_decorated):
if isinstance(func, str):
modname, qualname = func.rsplit(':', 1)
obj = import_module(modname)
for attr in qualname.split('.'):
obj = getattr(obj, attr)
if is_decorated:
return obj
func = obj.func
obj = cls(func, *args, **(kwargs or {}))
obj.__dict__.update(userdict)
return obj
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_keywords(func) is not False
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = is_arity(1, func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@instanceproperty(classval=__doc__)
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
(f.__name__ for f in reversed((self.first,) + self.funcs)),
)
except AttributeError:
return type(self).__name__
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
class juxt(object):
""" Creates a function that calls several functions with the same arguments
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
""" Call the function call with the arguments flipped
This function is curried.
>>> def div(a, b):
... return a // b
...
>>> flip(div, 2, 6)
3
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc):
""" Returns None.
"""
return None
class excepts(object):
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
def __init__(self, exc, func, handler=return_none):
self.exc = exc
self.func = func
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@instanceproperty(classval=__doc__)
def __doc__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=self,
exc=exc_name,
)
except AttributeError:
return type(self).__doc__
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.func.__name__, exc_name)
except AttributeError:
return 'excepting'
if PY3: # pragma: py2 no cover
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return None, builtin_func(*builtin_args)
elif not isinstance(sigspec, inspect.Signature):
if (
func in _sigs.signatures
and ((
hasattr(func, '__signature__')
and hasattr(func.__signature__, '__get__')
) or (
PY33
and hasattr(func, '__wrapped__')
and hasattr(func.__wrapped__, '__get__')
and not callable(func.__wrapped__)
))
): # pragma: no cover (not covered in Python 3.4)
val = builtin_func(*builtin_args)
return None, val
return None, False
return sigspec, None
else: # pragma: py3 no cover
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except TypeError as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return None, False
return None, builtin_func(*builtin_args)
return sigspec, None
if PY34 or PYPY: # pragma: no cover
_check_sigspec_orig = _check_sigspec
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
# Python 3.4 and PyPy may lie, so use our registry for builtins instead
if func in _sigs.signatures:
val = builtin_func(*builtin_args)
return None, val
return _check_sigspec_orig(sigspec, func, builtin_func, *builtin_args)
_check_sigspec.__doc__ = """ \
Private function to aid in introspection compatibly across Python versions.
If a callable doesn't have a signature (Python 3) or an argspec (Python 2),
the signature registry in toolz._signatures is used.
"""
if PY3: # pragma: py2 no cover
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
return sum(1 for p in sigspec.parameters.values()
if p.default is p.empty
and p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY))
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return any(p.kind == p.VAR_POSITIONAL
for p in sigspec.parameters.values())
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return any(p.default is not p.empty
or p.kind in (p.KEYWORD_ONLY, p.VAR_KEYWORD)
for p in sigspec.parameters.values())
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind(*args, **kwargs)
except TypeError:
return False
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind_partial(*args, **kwargs)
except TypeError:
return False
return True
else: # pragma: py3 no cover
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
num_defaults = len(sigspec.defaults) if sigspec.defaults else 0
return len(sigspec.args) - num_defaults
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return sigspec.varargs is not None
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return sigspec.defaults is not None or sigspec.keywords is not None
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
missing_pos = spec.args[len(args):num_pos]
if any(arg not in kwargs for arg in missing_pos):
return False
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs
or not spec.varargs and len(args) > len(spec.args)
or set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Add missing position arguments as keywords (may already be in kwargs)
missing_args = spec.args[len(args):num_pos + num_extra_pos]
kwargs.update((x, None) for x in missing_args)
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs
or not spec.varargs and len(args) > len(spec.args)
or set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
def is_arity(n, func, sigspec=None):
""" Does a function have only n positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x):
... return x
>>> is_arity(1, f)
True
>>> def g(x, y=1):
... return x + y
>>> is_arity(1, g)
False
"""
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_arity, n, func)
if sigspec is None:
return rv
num = num_required_args(func, sigspec)
if num is not None:
num = num == n
if not num:
return False
varargs = has_varargs(func, sigspec)
if varargs:
return False
keywords = has_keywords(func, sigspec)
if keywords:
return False
if num is None or varargs is None or keywords is None: # pragma: no cover
return None
return True
num_required_args.__doc__ = """ \
Number of required positional arguments
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y, z=3):
... return x + y + z
>>> num_required_args(f)
2
>>> def g(*args, **kwargs):
... pass
>>> num_required_args(g)
0
"""
has_varargs.__doc__ = """ \
Does a function have variadic positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(*args):
... return args
>>> has_varargs(f)
True
>>> def g(**kwargs):
... return kwargs
>>> has_varargs(g)
False
"""
has_keywords.__doc__ = """ \
Does a function have keyword arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y=0):
... return x + y
>>> has_keywords(f)
True
"""
is_valid_args.__doc__ = """ \
Is ``func(*args, **kwargs)`` a valid function call?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_valid_args(add, (1,), {})
False
>>> is_valid_args(add, (1, 2), {})
True
>>> is_valid_args(map, (), {})
False
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
is_partial_args.__doc__ = """ \
Can partial(func, *args, **kwargs)(*args2, **kwargs2) be a valid call?
Returns True *only* if the call is valid or if it is possible for the
call to become valid by adding more positional or keyword arguments.
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_partial_args(add, (1,), {})
True
>>> is_partial_args(add, (1, 2), {})
True
>>> is_partial_args(add, (1, 2, 3), {})
False
>>> is_partial_args(map, (), {})
True
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
from . import _signatures as _sigs
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/toolz/functoolz.py",
"copies": "1",
"size": "32092",
"license": "apache-2.0",
"hash": 2093802457664481800,
"line_mean": 28.8530232558,
"line_max": 79,
"alpha_frac": 0.5461174124,
"autogenerated": false,
"ratio": 3.9536774670444745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9999661989079026,
"avg_score": 0.000026578073089700997,
"num_lines": 1075
} |
from functools import reduce, partial
import inspect
import operator
from operator import attrgetter
from textwrap import dedent
from .compatibility import PY3, PY33, PY34, PYPY
from .utils import no_default
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry', 'flip', 'excepts')
def identity(x):
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def instanceproperty(fget=None, fset=None, fdel=None, doc=None, classval=None):
""" Like @property, but returns ``classval`` when used as a class attribute
>>> class MyClass(object):
... '''The class docstring'''
... @instanceproperty(classval=__doc__)
... def __doc__(self):
... return 'An object docstring'
... @instanceproperty
... def val(self):
... return 42
...
>>> MyClass.__doc__
'The class docstring'
>>> MyClass.val is None
True
>>> obj = MyClass()
>>> obj.__doc__
'An object docstring'
>>> obj.val
42
"""
if fget is None:
return partial(instanceproperty, fset=fset, fdel=fdel, doc=doc,
classval=classval)
return InstanceProperty(fget=fget, fset=fset, fdel=fdel, doc=doc,
classval=classval)
class InstanceProperty(property):
""" Like @property, but returns ``classval`` when used as a class attribute
Should not be used directly. Use ``instanceproperty`` instead.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None,
classval=None):
self.classval = classval
property.__init__(self, fget=fget, fset=fset, fdel=fdel, doc=doc)
def __get__(self, obj, type=None):
if obj is None:
return self.classval
return property.__get__(self, obj, type)
def __reduce__(self):
state = (self.fget, self.fset, self.fdel, self.__doc__, self.classval)
return InstanceProperty, state
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (
hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)
):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
self._sigspec = None
self._has_unknown_args = None
@instanceproperty
def func(self):
return self._partial.func
if PY3: # pragma: py2 no cover
@instanceproperty
def __signature__(self):
sig = inspect.signature(self.func)
args = self.args or ()
keywords = self.keywords or {}
if is_partial_args(self.func, args, keywords, sig) is False:
raise TypeError('curry object has incorrect arguments')
params = list(sig.parameters.values())
skip = 0
for param in params[:len(args)]:
if param.kind == param.VAR_POSITIONAL:
break
skip += 1
kwonly = False
newparams = []
for param in params[skip:]:
kind = param.kind
default = param.default
if kind == param.VAR_KEYWORD:
pass
elif kind == param.VAR_POSITIONAL:
if kwonly:
continue
elif param.name in keywords:
default = keywords[param.name]
kind = param.KEYWORD_ONLY
kwonly = True
else:
if kwonly:
kind = param.KEYWORD_ONLY
if default is param.empty:
default = no_default
newparams.append(param.replace(default=default, kind=kind))
return sig.replace(parameters=newparams)
@instanceproperty
def args(self):
return self._partial.args
@instanceproperty
def keywords(self):
return self._partial.keywords
@instanceproperty
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
def _should_curry(self, args, kwargs, exc=None):
func = self.func
args = self.args + args
if self.keywords:
kwargs = dict(self.keywords, **kwargs)
if self._sigspec is None:
sigspec = self._sigspec = _sigs.signature_or_spec(func)
self._has_unknown_args = has_varargs(func, sigspec) is not False
else:
sigspec = self._sigspec
if is_partial_args(func, args, kwargs, sigspec) is False:
# Nothing can make the call valid
return False
elif self._has_unknown_args:
# The call may be valid and raised a TypeError, but we curry
# anyway because the function may have `*args`. This is useful
# for decorators with signature `func(*args, **kwargs)`.
return True
elif not is_valid_args(func, args, kwargs, sigspec):
# Adding more arguments may make the call valid
return True
else:
# There was a genuine TypeError
return False
def bind(self, *args, **kwargs):
return type(self)(self, *args, **kwargs)
def call(self, *args, **kwargs):
return self._partial(*args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
# pickle protocol because functools.partial objects can't be pickled
def __getstate__(self):
# dictoolz.keyfilter, I miss you!
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k != '_partial')
return self.func, self.args, self.keywords, userdict
def __setstate__(self, state):
func, args, kwargs, userdict = state
self.__init__(func, *args, **(kwargs or {}))
self.__dict__.update(userdict)
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_keywords(func) is not False
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = is_arity(1, func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@instanceproperty(classval=__doc__)
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
f.__name__ for f in reversed((self.first,) + self.funcs),
)
except AttributeError:
return type(self).__name__
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
class juxt(object):
""" Creates a function that calls several functions with the same arguments
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
""" Call the function call with the arguments flipped
This function is curried.
>>> def div(a, b):
... return a / b
...
>>> flip(div, 2, 1)
0.5
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2.0
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc):
""" Returns None.
"""
return None
class excepts(object):
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
def __init__(self, exc, func, handler=return_none):
self.exc = exc
self.func = func
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@instanceproperty(classval=__doc__)
def __doc__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=self,
exc=exc_name,
)
except AttributeError:
return type(self).__doc__
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.func.__name__, exc_name)
except AttributeError:
return 'excepting'
if PY3: # pragma: py2 no cover
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return None, builtin_func(*builtin_args)
elif not isinstance(sigspec, inspect.Signature):
if (
func in _sigs.signatures
and ((
hasattr(func, '__signature__')
and hasattr(func.__signature__, '__get__')
) or (
PY33
and hasattr(func, '__wrapped__')
and hasattr(func.__wrapped__, '__get__')
and not callable(func.__wrapped__)
))
): # pragma: no cover (not covered in Python 3.4)
val = builtin_func(*builtin_args)
return None, val
return None, False
return sigspec, None
else: # pragma: py3 no cover
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except TypeError as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return None, False
return None, builtin_func(*builtin_args)
return sigspec, None
if PY34 or PYPY: # pragma: no cover
_check_sigspec_orig = _check_sigspec
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
# Python 3.4 and PyPy may lie, so use our registry for builtins instead
if func in _sigs.signatures:
val = builtin_func(*builtin_args)
return None, val
return _check_sigspec_orig(sigspec, func, builtin_func, *builtin_args)
_check_sigspec.__doc__ = """ \
Private function to aid in introspection compatibly across Python versions.
If a callable doesn't have a signature (Python 3) or an argspec (Python 2),
the signature registry in toolz._signatures is used.
"""
if PY3: # pragma: py2 no cover
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
return sum(1 for p in sigspec.parameters.values()
if p.default is p.empty
and p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY))
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return any(p.kind == p.VAR_POSITIONAL
for p in sigspec.parameters.values())
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return any(p.default is not p.empty
or p.kind in (p.KEYWORD_ONLY, p.VAR_KEYWORD)
for p in sigspec.parameters.values())
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind(*args, **kwargs)
except TypeError:
return False
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind_partial(*args, **kwargs)
except TypeError:
return False
return True
else: # pragma: py3 no cover
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
num_defaults = len(sigspec.defaults) if sigspec.defaults else 0
return len(sigspec.args) - num_defaults
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return sigspec.varargs is not None
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return sigspec.defaults is not None or sigspec.keywords is not None
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
missing_pos = spec.args[len(args):num_pos]
if any(arg not in kwargs for arg in missing_pos):
return False
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs
or not spec.varargs and len(args) > len(spec.args)
or set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Add missing position arguments as keywords (may already be in kwargs)
missing_args = spec.args[len(args):num_pos + num_extra_pos]
kwargs.update((x, None) for x in missing_args)
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs
or not spec.varargs and len(args) > len(spec.args)
or set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
def is_arity(n, func, sigspec=None):
""" Does a function have only n positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x):
... return x
>>> is_arity(1, f)
True
>>> def g(x, y=1):
... return x + y
>>> is_arity(1, g)
False
"""
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_arity, n, func)
if sigspec is None:
return rv
num = num_required_args(func, sigspec)
if num is not None:
num = num == n
if not num:
return False
varargs = has_varargs(func, sigspec)
if varargs:
return False
keywords = has_keywords(func, sigspec)
if keywords:
return False
if num is None or varargs is None or keywords is None: # pragma: no cover
return None
return True
num_required_args.__doc__ = """ \
Number of required positional arguments
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y, z=3):
... return x + y + z
>>> num_required_args(f)
2
>>> def g(*args, **kwargs):
... pass
>>> num_required_args(g)
0
"""
has_varargs.__doc__ = """ \
Does a function have variadic positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(*args):
... return args
>>> has_varargs(f)
True
>>> def g(**kwargs):
... return kwargs
>>> has_varargs(g)
False
"""
has_keywords.__doc__ = """ \
Does a function have keyword arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y=0):
... return x + y
>>> has_keywords(f)
True
"""
is_valid_args.__doc__ = """ \
Is ``func(*args, **kwargs)`` a valid function call?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_valid_args(add, (1,), {})
False
>>> is_valid_args(add, (1, 2), {})
True
>>> is_valid_args(map, (), {})
False
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
is_partial_args.__doc__ = """ \
Can partial(func, *args, **kwargs)(*args2, **kwargs2) be a valid call?
Returns True *only* if the call is valid or if it is possible for the
call to become valid by adding more positional or keyword arguments.
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_partial_args(add, (1,), {})
True
>>> is_partial_args(add, (1, 2), {})
True
>>> is_partial_args(add, (1, 2, 3), {})
False
>>> is_partial_args(map, (), {})
True
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
from . import _signatures as _sigs
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/toolz/functoolz.py",
"copies": "1",
"size": "30770",
"license": "mit",
"hash": -438882412883073600,
"line_mean": 28.558117195,
"line_max": 79,
"alpha_frac": 0.5485537862,
"autogenerated": false,
"ratio": 3.9413346996285386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49898884858285386,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, partial
import inspect
import operator
from toolz.utils import no_default
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def memoize(func, cache=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
"""
if cache is None:
cache = {}
try:
spec = inspect.getargspec(func)
if spec and not spec.keywords and not spec.defaults:
may_have_kwargs = False
else:
may_have_kwargs = True
except TypeError:
may_have_kwargs = True
def memof(*args, **kwargs):
try:
if may_have_kwargs:
key = (args, frozenset(kwargs.items()))
else:
key = args
in_cache = key in cache
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
if in_cache:
return cache[key]
else:
result = func(*args, **kwargs)
cache[key] = result
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
return memof
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, func, *args, **kwargs):
if not callable(func):
raise TypeError("Input must be callable")
self.func = func
self.args = args
self.keywords = kwargs if kwargs else None
self.__doc__ = self.func.__doc__
try:
self.func_name = self.func.func_name
except AttributeError:
pass
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __call__(self, *args, **_kwargs):
args = self.args + args
if _kwargs:
kwargs = {}
if self.keywords:
kwargs.update(self.keywords)
kwargs.update(_kwargs)
elif self.keywords:
kwargs = self.keywords
else:
kwargs = {}
try:
return self.func(*args, **kwargs)
except TypeError:
required_args = _num_required_args(self.func)
# If there was a genuine TypeError
if required_args is not None and len(args) >= required_args:
raise
# If we only need one more argument
if (required_args is not None and required_args - len(args) == 1):
if kwargs:
return partial(self.func, *args, **kwargs)
else:
return partial(self.func, *args)
return curry(self.func, *args, **kwargs)
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
self.funcs = funcs
def __call__(self, *args, **kwargs):
fns = list(reversed(self.funcs))
ret = fns[0](*args, **kwargs)
for f in fns[1:]:
ret = f(ret)
return ret
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = tuple(state)
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(*funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
def juxt(*funcs):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a sequence of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> list(juxt(inc, double)(10))
[11, 20]
>>> list(juxt([inc, double])(10))
[11, 20]
"""
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
def juxt_inner(*args, **kwargs):
return (func(*args, **kwargs) for func in funcs)
return juxt_inner
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
| {
"repo_name": "whilo/toolz",
"path": "toolz/functoolz/core.py",
"copies": "1",
"size": "9236",
"license": "bsd-3-clause",
"hash": 7884066073343873000,
"line_mean": 23.3693931398,
"line_max": 79,
"alpha_frac": 0.5436336076,
"autogenerated": false,
"ratio": 3.7914614121510675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4835095019751068,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, partial
import inspect
import operator
import sys
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry', 'flip')
def identity(x):
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
# This is a kludge for Python 3.4.0 support
# currently len(inspect.getargspec(map).args) == 0, a wrong result.
# As this is fixed in future versions then hopefully this kludge can be
# removed.
known_numargs = {map: 2, filter: 2, reduce: 2}
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
if func in known_numargs:
return known_numargs[func]
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
@property
def func(self):
return self._partial.func
@property
def args(self):
return self._partial.args
@property
def keywords(self):
return self._partial.keywords
@property
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError:
# If there was a genuine TypeError
required_args = _num_required_args(self.func)
if (required_args is not None and
len(args) + len(self.args) >= required_args):
raise
return curry(self._partial, *args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
# pickle protocol because functools.partial objects can't be pickled
def __getstate__(self):
# dictoolz.keyfilter, I miss you!
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k != '_partial')
return self.func, self.args, self.keywords, userdict
def __setstate__(self, state):
func, args, kwargs, userdict = state
self.__init__(func, *args, **(kwargs or {}))
self.__dict__.update(userdict)
def has_kwargs(f):
""" Does a function have keyword arguments?
>>> def f(x, y=0):
... return x + y
>>> has_kwargs(f)
True
"""
if sys.version_info[0] == 2: # pragma: no cover
spec = inspect.getargspec(f)
return bool(spec and (spec.keywords or spec.defaults))
if sys.version_info[0] == 3: # pragma: no cover
spec = inspect.getfullargspec(f)
return bool(spec.defaults)
def isunary(f):
""" Does a function have only a single argument?
>>> def f(x):
... return x
>>> isunary(f)
True
>>> isunary(lambda x, y: x + y)
False
"""
try:
if sys.version_info[0] == 2: # pragma: no cover
spec = inspect.getargspec(f)
if sys.version_info[0] == 3: # pragma: no cover
spec = inspect.getfullargspec(f)
return bool(spec and spec.varargs is None and not has_kwargs(f)
and len(spec.args) == 1)
except TypeError: # pragma: no cover
return None # in Python < 3.4 builtins fail, return None
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_kwargs(func)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = isunary(func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
def memof(*args, **kwargs):
try:
if key is not None:
k = key(args, kwargs)
elif is_unary:
k = args[0]
elif may_have_kwargs:
k = (args or None,
frozenset(kwargs.items()) if kwargs else None)
else:
k = args
in_cache = k in cache
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
if in_cache:
return cache[k]
else:
result = func(*args, **kwargs)
cache[k] = result
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@property
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
f.__name__ for f in reversed((self.first,) + self.funcs),
)
except AttributeError:
return type(self).__name__
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
class juxt(object):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
"""Call the function call with the arguments flipped.
This function is curried.
>>> def div(a, b):
... return a / b
...
>>> flip(div, 2, 1)
0.5
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2.0
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
| {
"repo_name": "jcrist/toolz",
"path": "toolz/functoolz.py",
"copies": "5",
"size": "14985",
"license": "bsd-3-clause",
"hash": 6766746920942812000,
"line_mean": 25.2894736842,
"line_max": 79,
"alpha_frac": 0.5468802135,
"autogenerated": false,
"ratio": 3.796554345072207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6843434558572208,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, partial
import inspect
import operator
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry')
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, func, *args, **kwargs):
if not callable(func):
raise TypeError("Input must be callable")
self.func = func
self.args = args
self.keywords = kwargs if kwargs else None
self.__doc__ = self.func.__doc__
try:
self.func_name = self.func.func_name
except AttributeError:
pass
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __call__(self, *args, **_kwargs):
args = self.args + args
if _kwargs:
kwargs = {}
if self.keywords:
kwargs.update(self.keywords)
kwargs.update(_kwargs)
elif self.keywords:
kwargs = self.keywords
else:
kwargs = {}
try:
return self.func(*args, **kwargs)
except TypeError:
required_args = _num_required_args(self.func)
# If there was a genuine TypeError
if required_args is not None and len(args) >= required_args:
raise
# If we only need one more argument
if (required_args is not None and required_args - len(args) == 1):
if kwargs:
return partial(self.func, *args, **kwargs)
else:
return partial(self.func, *args)
return curry(self.func, *args, **kwargs)
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
spec = inspect.getargspec(func)
may_have_kwargs = bool(not spec or spec.keywords or spec.defaults)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = (spec and spec.varargs is None and not may_have_kwargs
and len(spec.args) == 1)
except TypeError:
may_have_kwargs = True
is_unary = False
def memof(*args, **kwargs):
try:
if key is not None:
k = key(args, kwargs)
elif is_unary:
k = args[0]
elif may_have_kwargs:
k = (args or None,
frozenset(kwargs.items()) if kwargs else None)
else:
k = args
in_cache = k in cache
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
if in_cache:
return cache[k]
else:
result = func(*args, **kwargs)
cache[k] = result
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
self.funcs = funcs
def __call__(self, *args, **kwargs):
fns = list(reversed(self.funcs))
ret = fns[0](*args, **kwargs)
for f in fns[1:]:
ret = f(ret)
return ret
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = tuple(state)
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(*funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
def juxt(*funcs):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a sequence of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> list(juxt(inc, double)(10))
[11, 20]
>>> list(juxt([inc, double])(10))
[11, 20]
"""
if len(funcs) == 1 and not callable(funcs[0]):
funcs = tuple(funcs[0])
def juxt_inner(*args, **kwargs):
return (func(*args, **kwargs) for func in funcs)
return juxt_inner
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
| {
"repo_name": "larsmans/toolz",
"path": "toolz/functoolz.py",
"copies": "7",
"size": "10471",
"license": "bsd-3-clause",
"hash": 4116973708904466000,
"line_mean": 24.5390243902,
"line_max": 79,
"alpha_frac": 0.5474166746,
"autogenerated": false,
"ratio": 3.7897213174086137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7837137992008613,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, partial
import inspect
import operator
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, func, *args, **kwargs):
if not callable(func):
raise TypeError("Input must be callable")
self.func = func
self.args = args
self.keywords = kwargs if kwargs else None
self.__doc__ = self.func.__doc__
try:
self.func_name = self.func.func_name
except AttributeError:
pass
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __call__(self, *args, **_kwargs):
args = self.args + args
if _kwargs:
kwargs = {}
if self.keywords:
kwargs.update(self.keywords)
kwargs.update(_kwargs)
elif self.keywords:
kwargs = self.keywords
else:
kwargs = {}
try:
return self.func(*args, **kwargs)
except TypeError:
required_args = _num_required_args(self.func)
# If there was a genuine TypeError
if required_args is not None and len(args) >= required_args:
raise
# If we only need one more argument
if (required_args is not None and required_args - len(args) == 1):
if kwargs:
return partial(self.func, *args, **kwargs)
else:
return partial(self.func, *args)
return curry(self.func, *args, **kwargs)
@curry
def memoize(func, cache=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
"""
if cache is None:
cache = {}
try:
spec = inspect.getargspec(func)
may_have_kwargs = bool(not spec or spec.keywords or spec.defaults)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = (spec and spec.varargs is None and not may_have_kwargs
and len(spec.args) == 1)
except TypeError:
may_have_kwargs = True
is_unary = False
def memof(*args, **kwargs):
try:
if is_unary:
key = args[0]
elif may_have_kwargs:
key = (args, frozenset(kwargs.items()))
else:
key = args
in_cache = key in cache
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
if in_cache:
return cache[key]
else:
result = func(*args, **kwargs)
cache[key] = result
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
self.funcs = funcs
def __call__(self, *args, **kwargs):
fns = list(reversed(self.funcs))
ret = fns[0](*args, **kwargs)
for f in fns[1:]:
ret = f(ret)
return ret
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = tuple(state)
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(*funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
def juxt(*funcs):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a sequence of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> list(juxt(inc, double)(10))
[11, 20]
>>> list(juxt([inc, double])(10))
[11, 20]
"""
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
def juxt_inner(*args, **kwargs):
return (func(*args, **kwargs) for func in funcs)
return juxt_inner
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
| {
"repo_name": "joyrexus/toolz",
"path": "toolz/functoolz/core.py",
"copies": "1",
"size": "9658",
"license": "bsd-3-clause",
"hash": 3199862913879604000,
"line_mean": 23.7641025641,
"line_max": 79,
"alpha_frac": 0.5447297577,
"autogenerated": false,
"ratio": 3.787450980392157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832180738092157,
"avg_score": 0,
"num_lines": 390
} |
from functools import reduce, partial, wraps
import inspect
import operator
from operator import attrgetter
from textwrap import dedent
import sys
from .compatibility import PY3, PY34, PYPY
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry', 'flip', 'excepts')
def identity(x):
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
self._sigspec = None
self._has_unknown_args = None
@property
def func(self):
return self._partial.func
__wrapped__ = func
@property
def args(self):
return self._partial.args
@property
def keywords(self):
return self._partial.keywords
@property
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
def _should_curry(self, args, kwargs, exc=None):
func = self.func
args = self.args + args
if self.keywords:
kwargs = dict(self.keywords, **kwargs)
if self._sigspec is None:
sigspec = self._sigspec = _signature_or_spec(func)
self._has_unknown_args = _has_unknown_args(func, sigspec=sigspec)
else:
sigspec = self._sigspec
if is_partial_args(func, args, kwargs, sigspec=sigspec) is False:
# Nothing can make the call valid
return False
elif self._has_unknown_args:
# The call may be valid and raised a TypeError, but we curry
# anyway because the function may have `*args`. This is useful
# for decorators with signature `func(*args, **kwargs)`.
return True
elif not is_valid_args(func, args, kwargs, sigspec=sigspec):
# Adding more arguments may make the call valid
return True
else:
# There was a genuine TypeError
return False
def bind(self, *args, **kwargs):
return type(self)(self, *args, **kwargs)
def call(self, *args, **kwargs):
return self._partial(*args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
# pickle protocol because functools.partial objects can't be pickled
def __getstate__(self):
# dictoolz.keyfilter, I miss you!
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k != '_partial')
return self.func, self.args, self.keywords, userdict
def __setstate__(self, state):
func, args, kwargs, userdict = state
self.__init__(func, *args, **(kwargs or {}))
self.__dict__.update(userdict)
def has_kwargs(f):
""" Does a function have keyword arguments?
>>> def f(x, y=0):
... return x + y
>>> has_kwargs(f)
True
"""
if sys.version_info[0] == 2: # pragma: py3 no cover
spec = inspect.getargspec(f)
return bool(spec and (spec.keywords or spec.defaults))
if sys.version_info[0] == 3: # pragma: py2 no cover
spec = inspect.getfullargspec(f)
return bool(spec.defaults)
def isunary(f):
""" Does a function have only a single argument?
>>> def f(x):
... return x
>>> isunary(f)
True
>>> isunary(lambda x, y: x + y)
False
"""
try:
if sys.version_info[0] == 2: # pragma: py3 no cover
spec = inspect.getargspec(f)
if sys.version_info[0] == 3: # pragma: py2 no cover
spec = inspect.getfullargspec(f)
return bool(spec and spec.varargs is None and not has_kwargs(f)
and len(spec.args) == 1)
except TypeError: # pragma: no cover
return None # in Python < 3.4 builtins fail, return None
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_kwargs(func)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = isunary(func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@property
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
f.__name__ for f in reversed((self.first,) + self.funcs),
)
except AttributeError:
return type(self).__name__
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
class juxt(object):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
"""Call the function call with the arguments flipped.
This function is curried.
>>> def div(a, b):
... return a / b
...
>>> flip(div, 2, 1)
0.5
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2.0
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc):
"""Returns None.
"""
return None
class _ExceptsDoc(object):
"""A descriptor that allows us to get the docstring for both the
`excepts` class and generate a custom docstring for the instances of
excepts.
Parameters
----------
class_doc : str
The docstring for the excepts class.
"""
def __init__(self, class_doc):
self._class_doc = class_doc
def __get__(self, instance, owner):
if instance is None:
return self._class_doc
exc = instance.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=instance,
exc=exc_name,
)
except AttributeError:
return self._class_doc
class excepts(object):
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
# override the docstring above with a descritor that can return
# an instance-specific docstring
__doc__ = _ExceptsDoc(__doc__)
def __init__(self, exc, func, handler=return_none):
self.exc = exc
self.func = func
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.func.__name__, exc_name)
except AttributeError:
return 'excepting'
if PY3: # pragma: py2 no cover
def is_valid_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return _is_builtin_valid_args(func, args, kwargs)
elif isinstance(sigspec, TypeError):
return False
try:
sigspec.bind(*args, **kwargs)
except (TypeError, AttributeError):
return False
return True
else: # pragma: py3 no cover
def is_valid_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except TypeError as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return False
return _is_builtin_valid_args(func, args, kwargs)
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
missing_pos = spec.args[len(args):num_pos]
if any(arg not in kwargs for arg in missing_pos):
return False
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs or
not spec.varargs and len(args) > len(spec.args) or
set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
if PY34 or PYPY: # pragma: no cover
_is_valid_args = is_valid_args
def is_valid_args(func, args, kwargs, sigspec=None):
# Python 3.4 and PyPy may lie, so use our registry for builtins instead
val = _is_builtin_valid_args(func, args, kwargs)
if val is not None:
return val
return _is_valid_args(func, args, kwargs, sigspec=sigspec)
is_valid_args.__doc__ = """ \
Is ``func(*args, **kwargs)`` a valid function call?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_valid_args(add, (1,), {})
False
>>> is_valid_args(add, (1, 2), {})
True
>>> is_valid_args(map, (), {})
False
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
if PY3: # pragma: py2 no cover
def is_partial_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return _is_builtin_partial_args(func, args, kwargs)
elif isinstance(sigspec, TypeError):
return False
try:
sigspec.bind_partial(*args, **kwargs)
except (TypeError, AttributeError):
return False
return True
else: # pragma: py3 no cover
def is_partial_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except TypeError as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return False
return _is_builtin_partial_args(func, args, kwargs)
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Add missing position arguments as keywords (may already be in kwargs)
missing_args = spec.args[len(args):num_pos + num_extra_pos]
kwargs.update((x, None) for x in missing_args)
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs or
not spec.varargs and len(args) > len(spec.args) or
set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
if PY34 or PYPY: # pragma: no cover
_is_partial_args = is_partial_args
def is_partial_args(func, args, kwargs, sigspec=None):
# Python 3.4 and PyPy may lie, so use our registry for builtins instead
val = _is_builtin_partial_args(func, args, kwargs)
if val is not None:
return val
return _is_partial_args(func, args, kwargs, sigspec=sigspec)
is_partial_args.__doc__ = """ \
Can partial(func, *args, **kwargs)(*args2, **kwargs2) be a valid call?
Returns True *only* if the call is valid or if it is possible for the
call to become valid by adding more positional or keyword arguments.
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_partial_args(add, (1,), {})
True
>>> is_partial_args(add, (1, 2), {})
True
>>> is_partial_args(add, (1, 2, 3), {})
False
>>> is_partial_args(map, (), {})
True
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
from ._signatures import (is_builtin_valid_args as _is_builtin_valid_args,
is_builtin_partial_args as _is_builtin_partial_args,
has_unknown_args as _has_unknown_args,
signature_or_spec as _signature_or_spec)
| {
"repo_name": "pombredanne/toolz",
"path": "toolz/functoolz.py",
"copies": "1",
"size": "25136",
"license": "bsd-3-clause",
"hash": 4965721593215442000,
"line_mean": 27.5636363636,
"line_max": 79,
"alpha_frac": 0.5526336728,
"autogenerated": false,
"ratio": 3.8964501627654626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9949008077989705,
"avg_score": 0.000015151515151515153,
"num_lines": 880
} |
from functools import reduce, partial, wraps
import inspect
import operator
from operator import attrgetter
from textwrap import dedent
import sys
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry', 'flip', 'excepts')
def identity(x):
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
# This is a kludge for Python 3.4.0 support
# currently len(inspect.getargspec(map).args) == 0, a wrong result.
# As this is fixed in future versions then hopefully this kludge can be
# removed.
known_numargs = {map: 2, filter: 2, reduce: 2}
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
if func in known_numargs:
return known_numargs[func]
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
@property
def func(self):
return self._partial.func
__wrapped__ = func
@property
def args(self):
return self._partial.args
@property
def keywords(self):
return self._partial.keywords
@property
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError:
# If there was a genuine TypeError
required_args = _num_required_args(self.func)
if (required_args is not None and
len(args) + len(self.args) >= required_args):
raise
return curry(self._partial, *args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
# pickle protocol because functools.partial objects can't be pickled
def __getstate__(self):
# dictoolz.keyfilter, I miss you!
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k != '_partial')
return self.func, self.args, self.keywords, userdict
def __setstate__(self, state):
func, args, kwargs, userdict = state
self.__init__(func, *args, **(kwargs or {}))
self.__dict__.update(userdict)
def has_kwargs(f):
""" Does a function have keyword arguments?
>>> def f(x, y=0):
... return x + y
>>> has_kwargs(f)
True
"""
if sys.version_info[0] == 2: # pragma: no cover
spec = inspect.getargspec(f)
return bool(spec and (spec.keywords or spec.defaults))
if sys.version_info[0] == 3: # pragma: no cover
spec = inspect.getfullargspec(f)
return bool(spec.defaults)
def isunary(f):
""" Does a function have only a single argument?
>>> def f(x):
... return x
>>> isunary(f)
True
>>> isunary(lambda x, y: x + y)
False
"""
try:
if sys.version_info[0] == 2: # pragma: no cover
spec = inspect.getargspec(f)
if sys.version_info[0] == 3: # pragma: no cover
spec = inspect.getfullargspec(f)
return bool(spec and spec.varargs is None and not has_kwargs(f)
and len(spec.args) == 1)
except TypeError: # pragma: no cover
return None # in Python < 3.4 builtins fail, return None
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_kwargs(func)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = isunary(func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@property
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
f.__name__ for f in reversed((self.first,) + self.funcs),
)
except AttributeError:
return type(self).__name__
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
class juxt(object):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
"""Call the function call with the arguments flipped.
This function is curried.
>>> def div(a, b):
... return a / b
...
>>> flip(div, 2, 1)
0.5
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2.0
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc):
"""Returns None.
"""
return None
class _ExceptsDoc(object):
"""A descriptor that allows us to get the docstring for both the
`excepts` class and generate a custom docstring for the instances of
excepts.
Parameters
----------
class_doc : str
The docstring for the excepts class.
"""
def __init__(self, class_doc):
self._class_doc = class_doc
def __get__(self, instance, owner):
if instance is None:
return self._class_doc
exc = instance.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.f.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.f.__name__!r}:
{inst.f.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=instance,
exc=exc_name,
)
except AttributeError:
return self._class_doc
class excepts(object):
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
# override the docstring above with a descritor that can return
# an instance-specific docstring
__doc__ = _ExceptsDoc(__doc__)
def __init__(self, exc, f, handler=return_none):
self.exc = exc
self.f = f
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.f(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.f.__name__, exc_name)
except AttributeError:
return 'excepting'
| {
"repo_name": "autorealm/MayoiNeko",
"path": "develop/toolz/functoolz.py",
"copies": "1",
"size": "18001",
"license": "apache-2.0",
"hash": -515901521145279600,
"line_mean": 25.3557833089,
"line_max": 79,
"alpha_frac": 0.540358869,
"autogenerated": false,
"ratio": 3.860390306669526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9900749175669525,
"avg_score": 0,
"num_lines": 683
} |
from functools import reduce, partial, wraps
def const(x): return lambda _: x
def curry(f): return lambda x: lambda y: f(x,y)
def compose(*f): return reduce(lambda g,f: lambda x: g(f(x)), f)
def tco(func):
@wraps(func)
def func_run(*args, **kwargs):
res = func(*args, **kwargs)
while type(res) == tuple and len(res) == 2 and callable(res[0]):
fun, val = res
try: fun = getattr(fun, 'tco_rec')
except AttributeError:
return fun(*val) if type(val) == tuple else fun(val)
res = fun(*val) if type(val) == tuple else fun(val)
return res
func_run.tco_rec = func
return func_run
@tco
def upTo(n, i=0):
print(i)
if i >= n: return i
else: return upTo, (n, i+1)
def upToNoTCO(n, i=0):
print(i)
if i >= n: return i
else: return upToNoTCO(n, i+1)
@tco
def even(n):
return n == 0 or n != 1 and (odd, n-1)
@tco
def odd(n):
return n == 1 or n != 0 and (even, n-1)
from random import randrange, choice
def randbracks(size, ops):
l, r = ('(', ')') if randrange(2) else (' ', ' ')
if size <= 1:
contents = yield l
yield str(contents)
else:
yield l
for i in range(randrange(1, size)):
if i: yield ' ' + choice(ops) + ' '
yield from randbracks(size // 2, ops)
yield r
def randnums(len):
genr = randbracks(len, ['+', '-', '*'])
yield genr.send(None)
while True:
try: yield genr.send(randrange(100) - 50)
except StopIteration: break
def randbools(len):
genr = randbracks(len, ['and', 'or'])
yield genr.send(None)
while True:
try: yield genr.send(bool(randrange(2)))
except StopIteration: break
| {
"repo_name": "oisdk/PyParse",
"path": "Utils.py",
"copies": "1",
"size": "1749",
"license": "mit",
"hash": -6186365178648242000,
"line_mean": 24.347826087,
"line_max": 72,
"alpha_frac": 0.5557461407,
"autogenerated": false,
"ratio": 3.0738137082601056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8963774708734218,
"avg_score": 0.03315702804517763,
"num_lines": 69
} |
from functools import reduce
import itertools
from tkinter import messagebox
from googleTranslate.Translator import Translator
import re
__author__ = 'Girish'
class Subtitle_translator(Translator):
def __init__(self,to):
Translator.__init__(self,to)
def set_progress_bar(self,progress_bar):
self.progress_bar = progress_bar
def translate_file(self,from_,to):
try:
with open(to,'w') as out:
with open(from_) as file:
self.li= [ list(g) for f,g in itertools.groupby(file,key=lambda x:bool(x.strip())) if f]
for block in self.li:
line,time ,*content = block
out.writelines([line,time])
t2 =""
for line in content:
t2+=line.strip()+" "
t2=self.translate(t2)
t2+="\r\n\n"
out.writelines(t2)
except KeyboardInterrupt:
out.flush()
# self.progress_bar.destroy()
messagebox.showinfo("File Done","The file has been stored at "+to)
| {
"repo_name": "girishramnani/collegeProjects",
"path": "googleTranslate/Subtitle_translator.py",
"copies": "3",
"size": "1174",
"license": "mit",
"hash": 3401482940347902000,
"line_mean": 26.6341463415,
"line_max": 108,
"alpha_frac": 0.5161839864,
"autogenerated": false,
"ratio": 4.148409893992933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6164593880392932,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import itertools
import warnings
class Iterable:
def __init__(self, iterable):
iter(iterable)
self.__iterable = list(iterable)
def __iter__(self):
return iter(self.__iterable)
def __len__(self):
return len(list(self.__iterable))
# built-in equivalent data structures
def to_frozenset(self):
""" Equivalent to the built-in type **frozenset(** *iterable* **)**
:return: frozenset
>>> numbers = Iterable([10, 7, 28, 7, 19, 19, 70])
>>> numbers
<pyiterable.iterable.Iterable object at 0x017BA610>
>>> numbers.to_frozenset()
frozenset({10, 19, 28, 70, 7})
"""
return frozenset(self.__iterable)
def to_list(self):
""" Equivalent to the built-in function **list(** *iterable* **)**
:return: list
>>> grades = Iterable([('Alice', 94), ('Bob', 65), ('Charlie', 79), ('Daniel', 70)])
>>> grades
<pyiterable.iterable.Iterable object at 0x017BACB0>
>>> grades.to_list()
[('Alice', 94), ('Bob', 65), ('Charlie', 79), ('Daniel', 70)]
"""
return list(self.__iterable)
def to_set(self):
""" Equivalent to the built-in function **set(** *iterable* **)**
:return: set
>>> numbers = Iterable([10, 7, 28, 7, 19, 19, 70])
>>> numbers
<pyiterable.iterable.Iterable object at 0x017BA610>
>>> numbers.to_set()
{10, 19, 28, 70, 7}
"""
return set(self.__iterable)
def to_tuple(self):
""" Equivalent to the built-in function **tuple(** *iterable* **)**
:return: tuple
>>> numbers = Iterable([10, 7, 28, 7, 19, 19, 70])
>>> numbers
<pyiterable.iterable.Iterable object at 0x0130FE70>
>>> numbers.to_tuple()
(10, 7, 28, 7, 19, 19, 70)
"""
return tuple(self.__iterable)
# built-in equivalent transformations
def all(self):
""" Equivalent to the built-in function **all(** *iterable* **)**
:return: True if all elements in *self* are True, else False
>>> Iterable([True, False, True]).all()
False
>>> Iterable([True, True, True, True]).all()
True
"""
return all(self.__iterable)
def any(self):
""" Equivalent to the built-in function **any(** *iterable* **)**
:return: True if any element in *self* is True, else False
>>> Iterable([True, False, True]).any()
True
>>> Iterable([False, False, False, False]).any()
False
"""
return any(self.__iterable)
def enumerate(self, start=0):
""" Equivalent to the built-in function **enumerate(** *sequence, start=0* **)**
:param start: integer value to start from
:return: **(index + start, value)**, where **sequence[index] == value**
>>> grades = Iterable(['a', 'b', 'c', 'd', 'f'])
>>> grades.enumerate().to_list()
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd'), (5, 'f')]
>>> grades.enumerate(start=5).to_list()
[(5, 'a'), (6, 'b'), (7, 'c'), (8, 'd'), (9, 'f')]
"""
return Iterable(enumerate(self.__iterable, start))
def filter(self, function):
""" Equivalent to the built-in function **filter(** *function, iterable* **)**
:param function: function that returns **False** for items to exclude
:return: *Iterable* object that only contains items filtered by *function*
>>> grades = Iterable(['a', 'b', 'c', 'd', 'f'])
>>> grades.enumerate().filter(lambda i_x: i_x[0] < 3).to_list()
[(0, 'a'), (1, 'b'), (2, 'c')]
"""
return Iterable(filter(function, self.__iterable))
def len(self):
""" Equivalent to the built-in function **len(** *s* **)**
:return: number of items in *self*
>>> grades = Iterable(['a', 'b', 'c', 'd', 'f'])
>>> grades.len()
5
"""
return self.__len__()
def map(self, function):
""" Equivalent to the built-in function **map(** *function, iterable* **)**
:param function: function applied to every item in *self*
:return: *Iterable* of results
>>> numbers = Iterable([1, 3, 10, 4, 8])
>>> numbers.map(lambda x: x * 2).to_list()
[2, 6, 20, 8, 16]
"""
return Iterable(map(function, self.__iterable))
def max(self, **kwargs):
""" Equivalent to the built-in function **max(** *iterable, \*[, key, default]* **)**
:param key: keyword-only; function that returns the value to compare
:param default: keyword-only; value to return if *self* is empty. Only available in Python 3.4 or later
:return: largest item in *self*
>>> grades = Iterable([('Charlie', 79), ('Alice', 94), ('Bob', 65)])
>>> grades.max(key=lambda x: x[1])
('Alice', 94)
"""
return max(self.__iterable, **kwargs)
def min(self, **kwargs):
""" Equivalent to the built-in function **min(** *iterable, \*[, key, default]* **)**
:param key: keyword-only; function that returns the value to compare
:param default: keyword-only; value to return if *self* is empty. Only available in Python 3.4 or later
:return: smallest item in *self*
>>> grades = Iterable([('Charlie', 79), ('Alice', 94), ('Bob', 65)])
>>> grades.min(key=lambda x: x[1])
('Bob', 65)
"""
return min(self.__iterable, **kwargs)
def reversed(self):
""" Equivalent to the built-in function **reversed(** *seq* **)**
:return: *self* in the reversed order
>>> names = Iterable(['Bob', 'Alice', 'Daniel', 'Charlie'])
>>> names.reversed().to_list()
['Charlie', 'Daniel', 'Alice', 'Bob']
"""
return Iterable(reversed(self.__iterable))
def sorted(self, **kwargs):
""" Equivalent to the built-in function **sorted(** *iterable[, cmp[, key[, reverse]]]* **)**
:param cmp: keyword-only; custom comparison function. Only available in Python 2.x
:param key: keyword-only; function that returns the value to compare
:param reverse: keyword-only; boolean; if True, *self* is sorted with the largest value first
:return: a sorted *Iterable*
>>> grades = Iterable([('Charlie', 79), ('Alice', 94), ('Bob', 65)])
>>> grades.sorted().to_list()
[('Alice', 94), ('Bob', 65), ('Charlie', 79)]
>>> grades.sorted(key=lambda x: x[1]).to_list()
[('Bob', 65), ('Charlie', 79), ('Alice', 94)]
>>> grades.sorted(key=lambda x: x[1], reverse=True).to_list()
[('Alice', 94), ('Charlie', 79), ('Bob', 65)]
"""
return Iterable(sorted(self.__iterable, **kwargs))
def sum(self, start=0):
""" Equivalent to the built-in function **sum(** *iterable[, start]* **)**
:param start: starting value; default is 0
:return: sum of all values in *Iterable*
>>> numbers = Iterable([1, 3, 10, 4, 8])
>>> numbers.sum()
26
>>> numbers.sum(10)
36
"""
return sum(self.__iterable, start)
def zip(self, *args):
""" Equivalent to the built-in function **zip(** *[iterable, ...]* **)**
:param args: any number of iterable objects
:return: list of tuples; i-th tuple contains all elements from each i-th element in *self* and *\*args*
>>> left = Iterable(['Alice', 'Bob', 'Charlie', 'Daniel'])
>>> left.zip([94, 65, 79, 70]).to_list()
[('Alice', 94), ('Bob', 65), ('Charlie', 79), ('Daniel', 70)]
"""
return Iterable(zip(self.__iterable, *args))
# functools (Python 3) equivalent transformations
def reduce(self, function, initializer=None):
""" Equivalent to:
* **Python 2.x:** the built-in function **reduce(** *function, iterable[, initializer]* **)**
* **Python 3.x:** **reduce(** *function, iterable[, initializer]* **)** in *functools*
Repeatedly applies *function* to sequence until one value is left
:param function: function that takes two values and returns a single value
:param initializer: initial value combined with the first value in *self*
:return: single value
>>> values = Iterable([1, 2, 5, 9])
>>> values.reduce(lambda a, b: a + b)
17
>>> values.reduce(lambda a, b: a + b, 10)
27
"""
if initializer is None:
return reduce(function, self.__iterable)
else:
return reduce(function, self.__iterable, initializer)
# custom transformations / functions
def contains(self, value):
""" Equivalent to calling **value in** *iterable*
:param value: value to search for inside *iterable*
:return: *True* if value exists inside *iterable*, otherwise false
>>> values = Iterable([1, 2, 5, 9])
>>> values.contains(2)
True
>>> values.contains(4)
False
"""
return value in self.__iterable
def is_empty(self):
""" Equivalent to calling **len( list(** *iterable* **) ) == 0**
:return: *True* if *iterable* does not contain any elements; otherwise *False*
>>> Iterable([1, 2, 5, 9]).is_empty()
False
>>> Iterable([]).is_empty()
True
"""
return len(list(self.__iterable)) == 0
def mapmany(self, function):
""" Equivalent to calling **itertools.chain.from_iterable( map(** *function, iterable* **) )**
:param function: function to be applied to each input; outputs an iterable
:return: *Iterable* comprised of every element returned by **function**
>>> values = Iterable([1, 2, 5, 9])
>>> func = lambda x: [x, x]
>>> values.map(func).to_list()
[[1, 1], [2, 2], [5, 5], [9, 9]]
>>> values.mapmany(func).to_list()
[1, 1, 2, 2, 5, 5, 9, 9]
"""
return Iterable(itertools.chain.from_iterable(map(function, self.__iterable)))
def single(self, filter_by=None, default=None):
""" Equivalent to calling **first()**, except it raises *ValueError* if *iterable* contains more than one element
:param filter_by: keyword-only; function used to filter unwanted values
:param default: keyword-only value to return if *self* is empty after filtered by *filter_by*
:return: value of *self* filtered by *filter_by*
:raises ValueError: *iterable* contains more than one element after being filtered by *filter_by*
>>> values = Iterable([1, 2, 5, 9])
>>> values.single()
ValueError: iterable [1, 2, 5, 9] contains more than one element
>>> values.single(filter_by=lambda x: x > 1)
ValueError: iterable [2, 5, 9] contains more than one element
>>> values.single(filter_by=lambda x: x > 5)
9
>>> values.single(filter_by=lambda x: x > 10) # Returns None
>>> values.single(filter_by=lambda x: x > 10, default=0)
0
"""
if filter_by is None:
filtered_self = self
else:
filtered_self = self.filter(filter_by)
if filtered_self.len() > 1:
raise ValueError("iterable {} contains more than one element".format(filtered_self.__iterable))
return filtered_self.first(default=default)
# List-like transformations / functions
def concat(self, iterable):
""" Equivalent to calling **list(** *left* **) + list(** *right* **)**
:param iterable: iterable to concat with *self*
:return: New *Iterable* containing the elements from *self* and *iterable*
>>> left = [2, 10, 2, 2, 5, 9, 10]
>>> right = [13, -5, 1982, -10, 2384, 1982, 98]
>>> Iterable(left).concat(right).to_list()
[2, 10, 2, 2, 5, 9, 10, 13, -5, 1982, -10, 2384, 1982, 98]
"""
return Iterable(list(self.__iterable) + list(iterable))
def first(self, filter_by=None, default=None, function=None):
""" Equivalent to calling **next( iter( filter(** *filter_by, iterable* **) )** *, default* **)**
:param filter_by: keyword-only; function used to filter unwanted values
:param default: keyword-only; value to return if *self* is empty after filtered by *filter_by*
:param function: deprecated; use *filter_by*
:return: first value of *self* filtered by *filter_by*
>>> values = Iterable([1, 2, 5, 9])
>>> values.first()
1
>>> values.first(filter_by=lambda x: x > 5)
9
>>> values.first(filter_by=lambda x: x > 10) # Returns None
>>> values.first(filter_by=lambda x: x > 10, default=0)
0
"""
if function is not None:
warnings.warn(
"'function' is deprecated; use 'filter_by' instead",
category=DeprecationWarning
)
if filter_by is not None:
raise ValueError("both 'filter_by' and 'function' were provided; please only use 'filter_by', as 'function' is deprecated")
filter_func = filter_by or function
if filter_func:
return next(iter(filter(filter_func, self.__iterable)), default)
else:
return next(iter(self.__iterable), default)
def get(self, index):
""" Equivalent to calling **list(** *iterable* **)[** *index* **]**
* This function will convert the *iterable* to a sequence type before retrieving the value at *index*
* *-1* is not supported to get the last element; use **last()** instead
:param index: element number inside *iterable*
:return: value at *index* from *iterable*
:raises IndexError: *index* is less than 0 or is out of bounds
>>> values = Iterable([1, 2, 5, 9])
>>> values.get(2)
5
>>> values.get(-1)
IndexError: index out of range
>>> values.get(5)
IndexError: index out of range
"""
iterable_as_list = list(self.__iterable)
if index < 0 or index >= len(iterable_as_list):
raise IndexError("index out of range")
return list(self.__iterable)[index]
def last(self, filter_by=None, default=None):
""" Equivalent to calling **next( iter( reversed( list( filter(** *filter_by, iterable* **) ) ) )** *, default* **)**
:param filter_by: keyword-only; function used to filter unwanted values
:param default: keyword-only value to return if *self* is empty after filtered by *filter_by*
:return: last value of *self* filtered by *filter_by*
>>> values = Iterable([1, 2, 5, 9])
>>> values.last()
9
>>> values.last(filter_by=lambda x: x < 5)
2
>>> values.last(filter_by=lambda x: x < 1) # Returns None
>>> values.last(filter_by=lambda x: x < 1, default=0)
0
"""
if filter_by:
reversed_iterable = reversed(list(filter(filter_by, self.__iterable)))
else:
reversed_iterable = reversed(list(self.__iterable))
return next(iter(reversed_iterable), default)
def skip(self, count):
""" Skips the first *count* elements in *iterable*
* This function will convert the *iterable* to a sequence type before retrieving the values
* If *count* is equal to or greater than the length of *iterable*, no elements are taken
:param count: number of values to skip
:return: *Iterable* containing all the elements of *iterable* without the first *count* elements
:raises ValueError: *count* is a negative value
>>> values = Iterable([1, 2, 5, 9])
>>> values.skip(1).to_list()
[2, 5, 9]
>>> values.skip(3).to_list()
[9]
>>> values.skip(10).to_list()
[]
>>> values.take(-1).to_list()
ValueError: 'count' must be greater than 0
"""
if count < 0:
raise ValueError("'count' must be greater than 0")
elif count == 0:
return self
elif count >= len(self.__iterable):
return Iterable([])
else:
return Iterable(list(self.__iterable)[count:])
def take(self, count):
""" Gets the first *count* elements in *iterable*
* This function will convert the *iterable* to a sequence type before retrieving the values
* If *count* is equal to or greater than the length of *iterable*, all elements are taken
:param count: number of values to retrieve
:return: *Iterable* comprised of the first *count* elements
:raises ValueError: *count* is a negative value
>>> values = Iterable([1, 2, 5, 9])
>>> values.take(1).to_list()
[1]
>>> values.take(3).to_list()
[1, 2, 5]
>>> values.take(10).to_list()
[1, 2, 5, 9]
>>> values.take(-1).to_list()
ValueError: 'count' must be greater than 0
"""
if count < 0:
raise ValueError("'count' must be greater than 0")
elif count == 0:
return Iterable([])
elif count >= len(self.__iterable):
return self
else:
return Iterable(list(self.__iterable)[:count])
# Set-like transformations / functions
def difference(self, iterable):
""" Equivalent to calling **set(** *left* **).difference( set (** *iterable* **) )**
:param iterable: iterable to check against for differences
:return: New *Iterable* containing elements found in *self* but not *iterable*
>>> left = [2, 10, 1982, -5, 9, 10]
>>> right = [1982, -10, -5, 1982, 98]
>>> Iterable(left).difference(right).to_list()
[9, 2, 10]
"""
return Iterable(set(self.__iterable).difference(set(iterable)))
def distinct(self):
""" Equivalent to calling **set(** *iterable* **)**
:return: New *Iterable* containing only the distinct elements; order not preserved
>>> values = Iterable([2, 10, 2, 2, 5, 9, 10])
>>> values.distinct().to_list()
[9, 2, 10, 5]
"""
return Iterable(set(self.__iterable))
def intersection(self, iterable):
""" Equivalent to calling **set(** *left* **).intersection( set(** *right* **) )**
:param iterable: iterable to intersect with *self*
:return: *Iterable* with distinct values found in both *self* and *iterable*
>>> left = [2, 10, 1982, -5, 9, 10]
>>> right = [1982, -10, -5, 1982, 98]
>>> Iterable(left).intersection(right).to_list()
[-5, 1982]
"""
return Iterable(set(self.__iterable).intersection(set(iterable)))
def symmetric_difference(self, iterable):
""" Equivalent to calling **set(** *left* **).symmetric_difference( set(** *right* **) )**
:param iterable: iterable to perform symmetric difference against
:return: *Iterable* with distinct values found in either *self* or *iterable* but not both
>>> left = [2, 10, 1982, -5, 9, 10]
>>> right = [1982, -10, -5, 1982, 98]
>>> Iterable(left).symmetric_difference(right).to_list()
[98, 2, 9, 10, -10]
"""
return Iterable(set(self.__iterable).symmetric_difference(set(iterable)))
def union(self, iterable):
""" Equivalent to calling **set(** *left* **).union( set(** *right* **) )**
:param iterable: iterable to union with *self*
:return: *Iterable* with distinct values in either *self* or *iterable*
>>> left = [2, 10, 2, 2, 5, 9, 10]
>>> right = [1982, -10, 5, 1982, 9]
>>> Iterable(left).union(right).to_list()
[2, 5, 9, 10, -10, 1982]
"""
return Iterable(set(self.__iterable).union(set(iterable)))
| {
"repo_name": "neverendingqs/pyiterable",
"path": "pyiterable/iterable.py",
"copies": "1",
"size": "20514",
"license": "mit",
"hash": -745946282186158300,
"line_mean": 35.9186691312,
"line_max": 139,
"alpha_frac": 0.5401189432,
"autogenerated": false,
"ratio": 4.011341415721549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051460358921549,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import operator
from django.db import models
from django.db.models import Prefetch, Q, Count
from django.urls import reverse, reverse_lazy
from django.conf import settings
from django.utils.translation import gettext_lazy as _, gettext
from django.contrib.humanize.templatetags.humanize import naturalday
from django.contrib.auth import get_user_model
from cuser.middleware import CuserMiddleware
from .fields import ProtectedFileField
from diventi.feedbacks.models import Survey
from diventi.core.models import (
Element,
DiventiImageModel,
TimeStampedModel,
PublishableModel,
PublishableModelQuerySet,
Category,
Element,
SectionModel,
HighlightedModel,
DiventiColModel,
)
class ProductQuerySet(PublishableModelQuerySet):
# Prefetch all relevant data
def prefetch(self):
products = self.prefetch_related('chapters')
products = products.prefetch_related('authors')
products = products.prefetch_related('related_products')
products = products.prefetch_related('customers')
products = products.prefetch_related('details')
products = products.select_related('category')
products = products.prefetch_related('formats')
products = products.select_related('product_survey')
return products
# Fetch the products purchased by the user
def user_collection(self, user):
current_user = CuserMiddleware.get_user() # The user that is operating in the session
products = self.filter(customers=user)
if current_user != user: # Hide non-published products if the user is not visiting his own profile
products = products.published()
products = products.prefetch()
return products
# Fetch the products authored by the user
def user_authored(self, user):
products = self.filter(authors=user)
products = products.prefetch()
return products
# Return true if the user has authored at least one product
def has_user_authored(self, user):
return self.user_authored(user).exists()
# Return the list of products that are pinned.
def pinned(self):
products = self.published().filter(pinned=True)
products = products.prefetch()
return products
class ProductCategoryQuerySet(models.QuerySet):
# Meta categories won\'t be listed in search results, nor on reporting pages.
# In addition, we show categories that are related to published projects only.
def visible(self):
categories = self.exclude(meta_category=True)
published_projects = Product.objects.published()
categories = categories.filter(projects__pk__in=published_projects) # Exclude empty categories
categories = categories.prefetch_related(Prefetch('projects', queryset=published_projects))
return categories
class ProductCategory(Element):
"""
Defines the type of a product.
"""
meta_category = models.BooleanField(
default=False,
verbose_name=_('meta category'),
help_text=_('Meta categories won\'t be listed in search results, nor on reporting pages.')
)
objects = ProductCategoryQuerySet.as_manager()
class Meta:
verbose_name = _('Product category')
verbose_name_plural = _('Product categories')
class ProductFormat(Element):
""" A specific format of a product."""
class Meta:
verbose_name = _('Format')
verbose_name_plural = _('Formats')
class Product(TimeStampedModel, PublishableModel, DiventiImageModel, Element, SectionModel, DiventiColModel):
""" An adventure or a module published by Diventi. """
title = models.CharField(
max_length=50,
verbose_name=_('title')
)
short_description = models.TextField(
blank=True,
max_length=50,
verbose_name=_('short description')
)
abstract = models.TextField(
blank=True,
max_length=200,
verbose_name=_('abstract')
)
description = models.TextField(
blank=True,
verbose_name=_('description')
)
slug = models.SlugField(
unique=True,
verbose_name=_('slug')
)
authors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='products',
verbose_name=_('authors')
)
buyers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='collection',
blank=True,
verbose_name=_('buyers')
)
customers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='Purchase',
blank=True,
verbose_name=_('customers')
)
file = ProtectedFileField(
upload_to='products/files/',
blank=True,
verbose_name=_('file')
)
category = models.ForeignKey(
ProductCategory,
null=True,
blank=True,
default='default',
related_name='projects',
on_delete=models.SET_NULL,
verbose_name=_('category'),
)
unfolded = models.BooleanField(
default = False,
verbose_name = _('unfolded'),
) # Unfolded products can be bought by users
pinned = models.BooleanField(
default = True,
verbose_name = _('pinned')
) # Pinned products appear on top of the landing page
early_access = models.BooleanField(
default = True,
verbose_name = _('early access')
) # Products in early access activates special messages on their own pages
courtesy_short_message = models.CharField(
blank=True,
max_length=50,
verbose_name=_('short courtesy messages')
)
courtesy_message = models.TextField(
blank=True,
verbose_name=_('courtesy message')
) # folded products returns this message to users
related_products = models.ManyToManyField(
'self',
related_name='products',
blank=True,
verbose_name=_('related products'),
) # Connect this product to others
formats = models.ManyToManyField(
ProductFormat,
blank = True,
related_name = 'products',
verbose_name = _('formats'),
)
stripe_price = models.CharField(
blank = True,
max_length = 50,
verbose_name = _('stripe price')
)
stripe_product = models.CharField(
unique = True,
null = True,
blank = True,
max_length = 50,
verbose_name = _('stripe product')
)
product_survey = models.ForeignKey(
Survey,
related_name = 'product',
on_delete = models.SET_NULL,
null = True,
blank = True,
verbose_name = _('survey')
)
objects = ProductQuerySet.as_manager()
class Meta:
verbose_name = _('product')
verbose_name_plural = _('products')
def __str__(self):
return '{} ({})'.format(self.title, self.category)
def class_name(self):
return _('application')
def get_absolute_url(self):
return reverse('products:detail', args=[str(self.slug)])
def get_lazy_absolute_url(self):
return reverse_lazy('products:detail', args=[str(self.slug)])
def search(self, query, *args, **kwargs):
results = Product.objects.published()
query_list = query.split()
results = results.filter(
reduce(operator.and_,
(Q(title__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(description__icontains=q) for q in query_list))
)
return results
def reporting(self, *args, **kwargs):
queryset = Product.objects.all().exclude(category__meta_category=True)
queryset = queryset.prefetch()
results = []
for product in queryset:
last_purchase = Purchase.objects.last_purchase(product)
prefix = _('Last purchase')
customers_en = Purchase.objects.customers(product, 'en')
customers_en_emails = Purchase.objects.customers_emails(product, 'en')
customers_it = Purchase.objects.customers(product, 'it')
customers_it_emails = Purchase.objects.customers_emails(product, 'it')
results.append({
'columns': 6,
'name': _('%(product)s: total customers') % {
'product': product.title,
},
'title': product.customers.count(),
'description1': _('%(en)s english customers, %(it)s italian customers') % {
'en': customers_en.count(),
'it': customers_it.count(),
},
'description2': last_purchase.get_description(prefix) if last_purchase is not None else prefix + ': -',
'action': '',
})
results.append({
'columns': 3,
'name': _('%(product)s: english subscribers') % {
'product': product.title,
},
'title': customers_en_emails.count(),
'action': {
'label': _('copy emails'),
'function': 'copy-emails',
'parameters': customers_en_emails,
},
})
results.append({
'columns': 3,
'name': _('%(product)s: italian subscribers') % {
'product': product.title,
},
'title': customers_it_emails.count(),
'action': {
'label': _('copy emails'),
'function': 'copy-emails',
'parameters': customers_it_emails,
},
})
return results
# Return True if the user has added the product to his collections
def user_has_already_bought(self, user):
return user in self.customers.all()
# Return True if the user has authored this collection
def user_has_authored(self, user):
return user in self.authors.all()
# Returns the default currency of any product
def get_currency(self):
return 'EUR'
# Returns the publishable status of the product
def get_status(self):
if self.published:
return _('published')
else:
return _('draft')
# Returns true if the product is free
def _at_a_premium(self):
if self.stripe_product and self.stripe_price:
return True
else:
return False
_at_a_premium.boolean = True
_at_a_premium.short_description = _('at a premium')
at_a_premium = property(_at_a_premium)
class ProductDetail(Element, HighlightedModel):
""" A specific detail of a product."""
product = models.ForeignKey(
Product,
null = True,
related_name = 'details',
verbose_name = _('product'),
on_delete = models.SET_NULL,
)
class Meta:
verbose_name = _('Detail')
verbose_name_plural = _('Details')
class ChapterCategory(Category):
""" Defines the type of a chapter. """
class Meta:
verbose_name = _('Chapter category')
verbose_name_plural = _('Chapter categories')
class Chapter(Element, DiventiImageModel):
""" A specific chapter of a product."""
product = models.ForeignKey(Product, null=True, related_name='chapters', verbose_name=_('product'), on_delete=models.SET_NULL)
category = models.ForeignKey(ChapterCategory, null=True, verbose_name=_('category'), on_delete=models.SET_NULL)
class Meta:
verbose_name = _('Chapter')
verbose_name_plural = _('Chapters')
class ImagePreview(Element, DiventiImageModel):
"""A list of cool images of the product."""
product = models.ForeignKey(Product, null=True, related_name='imagepreviews', verbose_name=_('product'), on_delete=models.SET_NULL)
class Meta:
verbose_name = _('Image')
verbose_name_plural = _('Images')
class PurchaseQuerySet(models.QuerySet):
# Prefetch all relevant data
def related(self):
purchases = self.select_related('customer')
purchases = purchases.select_related('product')
return purchases
# Returns the users that purchased the product
# with "lan" as main language
def customers(self, product, lan=None):
purchases = self.filter(product=product)
customers_id = purchases.values_list('customer')
UserModel = get_user_model()
customers = UserModel.objects.filter(id__in=customers_id)
if lan:
customers = customers.filter(language=lan)
customers = customers.is_active()
return customers
# Returns the emails of users that purchased the product
# with "lan" as main language
def customers_emails(self, product, lan):
customers = self.customers(product, lan)
customers = customers.has_agreed_gdpr()
customers = customers.values_list('email', flat=True)
return customers
# Returns the last customer that has purchased the product
# with "lan" as main language
def last_purchase(self, product, lan=None):
purchases = self.related()
purchases = purchases.filter(product=product)
if lan:
purchases = purchases.filter(customer__language=lan)
last_purchase = purchases.order_by('-created').first()
return last_purchase
class Purchase(TimeStampedModel):
product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name=_('product'))
customer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('customer'))
objects = PurchaseQuerySet.as_manager()
class Meta:
verbose_name = _('Purchase')
verbose_name_plural = _('Purchases')
def __str__(self):
return _('Purchase: %(id)s') % {'id': self.id}
# Returns the description of the purchase
# Or none if none is found
def get_description(self, prefix):
description = _('%(prefix)s: %(last_pur)s on %(created)s') % {
'prefix': prefix,
'last_pur': self.customer.get_short_name(),
'created': naturalday(self.created),
}
return description | {
"repo_name": "flavoi/diventi",
"path": "diventi/products/models.py",
"copies": "1",
"size": "14787",
"license": "apache-2.0",
"hash": -1274900968654638300,
"line_mean": 32.388372093,
"line_max": 135,
"alpha_frac": 0.5890429489,
"autogenerated": false,
"ratio": 4.375554897898787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008775159863191656,
"num_lines": 430
} |
from functools import reduce
import operator
from django.db import models
from django.db.models import Q, Count, Sum
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.contrib.auth.models import AbstractUser, UserManager
from django.utils.text import slugify
from django.contrib.humanize.templatetags.humanize import naturalday
from diventi.core.models import DiventiImageModel, Element
from diventi.products.models import Product
class DiventiUserQuerySet(models.QuerySet):
# Return active users
def is_active(self):
return self.filter(is_active=True)
# Fetch all users that agreed to GDPR
def has_agreed_gdpr(self):
users = self.is_active()
users = self.filter(has_agreed_gdpr=True)
return users
# Fetch all the achievements related to the user
def achievements(self):
user = self.prefetch_related('achievements')
return user
# Fetch all users that made at least a product
def authors(self):
products = Product.objects.all()
users = self.filter(products__in=products)
return users
# Returns the emails of a group of users
def emails(self):
return self.values('language').annotate(total=Count('email')).order_by('language')
# Returns the users that set "lan" as main language
def subscribers(self, lan):
users = self.is_active()
users = self.filter(language=lan)
return users
# Returns the emails of users that set "lan" as main language
def subscribers_emails(self, lan):
users = self.subscribers(lan)
users = users.has_agreed_gdpr()
emails = users.values_list('email', flat=True)
return emails
# Returns the last user that has signed up and agreed to gdpr
# with "lan" as main language
def last_subscriber(self, lan=None):
users = self.is_active()
if lan:
users = users.filter(language=lan)
user = users.order_by('-date_joined').first()
return user
class DiventiUserManager(UserManager):
def get_queryset(self):
return DiventiUserQuerySet(self.model, using=self._db)
def has_agreed_gdpr(self):
return self.get_queryset().has_agreed_gdpr()
def achievements(self):
return self.get_queryset().achievements()
def authors(self):
return self.get_queryset().authors()
def emails(self):
return self.get_queryset().emails()
def subscribers(self, lan):
return self.get_queryset().subscribers(lan)
def subscribers_emails(self, lan):
return self.get_queryset().lan_emails(lan)
def last_subscriber(self, lan):
return self.get_queryset().last_subscriber(lan)
class DiventiAvatarQuerySet(models.QuerySet):
# Fetch all users related to the avatar
def users(self):
avatar = self.diventiuser.all()
return avatar
class DiventiAvatar(DiventiImageModel):
staff_only = models.BooleanField(default=False, verbose_name=_('staff_only'))
objects = DiventiAvatarQuerySet.as_manager()
class Meta:
verbose_name = _('Avatar')
verbose_name_plural = _('Avatars')
class DiventiProfilePic(DiventiImageModel):
class Meta:
verbose_name = _('Profile picture')
verbose_name_plural = _('Profile pictures')
class DiventiCover(DiventiImageModel):
objects = DiventiAvatarQuerySet.as_manager()
class Meta:
verbose_name = _('Cover')
verbose_name_plural = _('Covers')
class Achievement(Element):
class Meta:
verbose_name = _('Achievement')
verbose_name_plural = _('Achievements')
class Role(Element):
class Meta:
verbose_name = _('Role')
verbose_name_plural = _('Roles')
class DiventiUser(AbstractUser):
nametag = models.SlugField(max_length=200, unique=True, verbose_name=_('nametag'))
language = models.CharField(blank=True, max_length=10, choices=settings.LANGUAGES, default=settings.LANGUAGE_CODE, verbose_name=_('language'))
has_agreed_gdpr = models.NullBooleanField(blank=True, verbose_name=_('subscriber status'))
avatar = models.ForeignKey(DiventiAvatar, blank=True, null=True, related_name='diventiuser', on_delete=models.SET_NULL, verbose_name=_('avatar'))
cover = models.ForeignKey(DiventiCover, blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_('cover'))
profilepic = models.ForeignKey(DiventiProfilePic, blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_('profilepic')) # For staff use only
bio = models.TextField(blank=True, verbose_name=_('bio'))
role = models.ForeignKey(Role, blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_('role'))
achievements = models.ManyToManyField(Achievement, related_name='users')
objects = DiventiUserManager()
class Meta:
verbose_name = _('User')
verbose_name_plural = _('Users')
ordering = ('nametag', )
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.nametag = '-'.join((slugify(self.get_short_name()), slugify(self.pk)))
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse_lazy('accounts:detail', kwargs={'nametag': self.nametag})
def class_name(self):
return _('account')
def clean(self):
# Set the email as username
self.username = self.email
def get_diventi_username(self):
if self.first_name:
return self.get_full_name()
else:
return self.get_username()
def search(self, query, *args, **kwargs):
results = DiventiUser.objects.all()
query_list = query.split()
queryset = results.filter(
reduce(operator.and_,
(Q(first_name__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(role__title__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(bio__icontains=q) for q in query_list))
)
results = []
for user in queryset:
row = {
'class_name': user.class_name(),
'title': user.first_name,
'description': user.bio,
'get_absolute_url': user.get_absolute_url()
}
results.append(row)
return results
# Returns the description of the subscriber
# Or none if none is found
def get_description(self, prefix):
description = _('%(prefix)s: %(last_sub)s on %(date_joined)s') % {
'prefix': prefix,
'last_sub': self.get_short_name(),
'date_joined': naturalday(self.date_joined),
}
return description
def reporting(self, *args, **kwargs):
queryset = DiventiUser.objects.all().is_active()
results = []
last_subscriber = queryset.last_subscriber()
prefix = _('Last subscriber')
results.append({
'columns': 6,
'name': _("users count"),
'title': queryset.count(),
'description1': _('%(en)s english subscribers, %(it)s italian subscribers') % {
'en': queryset.subscribers('en').count(),
'it': queryset.subscribers('it').count(),
},
'description2': last_subscriber.get_description(prefix) if last_subscriber is not None else prefix + ': -',
'action': '',
})
results.append({
'columns': 3,
'name': _("english subscribers"),
'title': queryset.subscribers_emails('en').count(),
'action': {'label': _('copy emails'), 'function': 'copy-emails', 'parameters': queryset.subscribers_emails('en')},
})
results.append({
'columns': 3,
'name': _("italian subscribers"),
'title': queryset.subscribers_emails('it').count(),
'action': {'label': _('copy emails'), 'function': 'copy-emails', 'parameters': queryset.subscribers_emails('it')},
})
return results
def __str__(self):
return u'{0} ({1})'.format(self.get_short_name(), self.username)
| {
"repo_name": "flavoi/diventi",
"path": "diventi/accounts/models.py",
"copies": "1",
"size": "8546",
"license": "apache-2.0",
"hash": -7439312121132650000,
"line_mean": 33.0163934426,
"line_max": 154,
"alpha_frac": 0.6008895131,
"autogenerated": false,
"ratio": 4.026390197926484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0035774993770843443,
"num_lines": 244
} |
from functools import reduce
BASE_COST = 800
discount = [1.0, 1.0, 0.95, 0.9, 0.8, 0.75]
def groupCost(g):
return len(g) * discount[len(g)]
class Grouping:
def __init__(self, groups=None):
self.groups = [set()] if groups is None else groups
def total(self):
return sum(map(groupCost, self.groups)) * BASE_COST
def dup(self):
return Grouping(list(map(set, self.groups)))
def add_to_valid(self, b):
"""Returns all possible groupings from current grouping adding book b
"""
other = self.dup()
other.groups.sort(key=lambda g: len(g))
results = []
for i, g in enumerate(other.groups):
if b not in g:
o2 = other.dup()
o2.groups[i].add(b)
results.append(o2)
if not results:
other.groups.append(set([b]))
return [other]
return results
def __lt__(self, other):
return self.total() < other.total()
def step(rs, b):
return [g for r in rs for g in r.add_to_valid(b)]
def calculate_total(books):
if len(books) == 0:
return 0
start = Grouping([{books[0]}])
return round(min(reduce(step, books[1:], [start])).total())
| {
"repo_name": "N-Parsons/exercism-python",
"path": "exercises/book-store/example.py",
"copies": "1",
"size": "1293",
"license": "mit",
"hash": 3424081276115114500,
"line_mean": 24.387755102,
"line_max": 77,
"alpha_frac": 0.5367362722,
"autogenerated": false,
"ratio": 3.552197802197802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4588934074397802,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
BASE_COST = 800
discount = [1.0, 1.0, 0.95, 0.9, 0.8, 0.75]
def group_cost(group):
return len(group) * discount[len(group)]
class Grouping:
def __init__(self, groups=None):
self.groups = [set()] if groups is None else groups
def total(self):
return sum(map(group_cost, self.groups)) * BASE_COST
def dup(self):
return Grouping(list(map(set, self.groups)))
def add_to_valid(self, book):
"""Returns all possible groupings from the
current grouping adding book
"""
other = self.dup()
other.groups.sort(key=lambda g: len(g))
results = []
for index, group in enumerate(other.groups):
if book not in group:
other2 = other.dup()
other2.groups[index].add(book)
results.append(other2)
if not results:
other.groups.append(set([book]))
return [other]
return results
def __lt__(self, other):
return self.total() < other.total()
def step(basket, book):
return [group for groupings in basket
for group in groupings.add_to_valid(book)]
def total(basket):
if len(basket) == 0:
return 0
start = Grouping([{basket[0]}])
return round(min(reduce(step, basket[1:], [start])).total())
| {
"repo_name": "smalley/python",
"path": "exercises/book-store/example.py",
"copies": "2",
"size": "1403",
"license": "mit",
"hash": 7596700005181253000,
"line_mean": 25.5098039216,
"line_max": 64,
"alpha_frac": 0.5566642908,
"autogenerated": false,
"ratio": 3.8125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 51
} |
from functools import reduce
def breakup(line):
dimensions = [int(number) for number in line.split("x")]
print (line + " broke in to " + str(dimensions))
return dimensions
def double(x):
return 2*x
def calculatesides(dimensions):
print ("Given dimensions " + str(dimensions))
areas = buildSortedAreas(dimensions)
smallest_side = areas[0]
cubic_area = sum(map(double, areas))
return cubic_area + smallest_side
def buildSortedAreas(dimensions):
print ("Building Areas from " + str(dimensions))
length = dimensions[0]
width = dimensions[1]
height = dimensions[2]
areas = [length * width, length * height, width * height]
return sort(areas)
def calculate_wrapping_paper(content):
content = cleanup(content)
dimensions = map(breakup, content)
paper_per_present = map(calculatesides, dimensions)
return sum(paper_per_present)
def cleanup(content):
def trim(string):
return string.strip()
return map(trim, content)
def sort(list):
list.sort()
return list
def calculate_ribbon(content):
content = cleanup(content)
dimensions = map(breakup, content)
dimensions = list(map(sort, dimensions))
all_around_presents = sum(list(map(ribbon_to_wrap, dimensions)))
print ("all_around_presents")
print (all_around_presents)
all_bows = sum( map(ribbon_for_bow, dimensions))
print (all_bows)
return all_around_presents + all_bows
def ribbon_to_wrap(dimensions):
smallest_sides_dimensions = dimensions[:2]
print ("Found smallest side of " + str(smallest_sides_dimensions))
to_wrap = reduce ((lambda x, y: x+y), map( (lambda x: x+x) , smallest_sides_dimensions))
print ("ribbon to go around the sides: " + str(to_wrap))
return to_wrap
def ribbon_for_bow(dimensions):
return reduce( (lambda x, y: x * y), dimensions )
| {
"repo_name": "icbat/adventofcode2015",
"path": "python/day2/wrapping_paper_dimensions.py",
"copies": "1",
"size": "1934",
"license": "mit",
"hash": 3235440595638385000,
"line_mean": 28.6984126984,
"line_max": 92,
"alpha_frac": 0.6530506722,
"autogenerated": false,
"ratio": 3.574861367837338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9660798855239696,
"avg_score": 0.013422636959528288,
"num_lines": 63
} |
from functools import reduce
def pick(obj, key):
if type(obj) is dict:
return obj[key]
else:
return getattr(obj, key)
def match(part, template):
if type(template) is dict:
try:
return all([match(pick(part,k),v) for k,v in template.items()])
except (KeyError, AttributeError):
return False
elif callable(template):
return template(part)
else:
return part == template
def kmatch(msg, key, template):
if key == '_':
part = msg
else:
try:
levels = key.split('__')
part = reduce(pick, levels, msg)
# Drill down one level at a time, similar to:
# reduce(lambda a,b: a[b], ['chat', 'id'], msg)
except (KeyError, AttributeError):
return False
return match(part, template)
# Do not bracket `match()` in above `try` clause because
# `template()` may produce its own errors.
def ok(msg, **kwargs):
return all(map(kmatch, [msg]*len(kwargs), *zip(*kwargs.items())))
# e.g. {'a':1, 'b':2} -> [('a','b'), (1,2)]
| {
"repo_name": "neerajvashistha/pa-dude",
"path": "lib/python2.7/site-packages/telepot/filtering.py",
"copies": "4",
"size": "1195",
"license": "mit",
"hash": 1546235357802989000,
"line_mean": 26.4523809524,
"line_max": 87,
"alpha_frac": 0.5112970711,
"autogenerated": false,
"ratio": 3.943894389438944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005538309410701063,
"num_lines": 42
} |
from functools import reduce
from dynts.conf import settings
from ...api import timeseries, is_timeseries
class Expr:
'''Base class for abstract syntax nodes
'''
def count(self):
'''Number of nodes'''
return 1
def malformed(self):
return False
@property
def type(self):
return self.__class__.__name__.lower()
def info(self):
return ''
def __repr__(self):
return '%s' % self.info()
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return isinstance(other, Expr) and str(other) == str(self)
def symbols(self):
'''Return a list of :class:`dynts.dsl.Symbol` instances.'''
return None
def variables(self):
return None
def removeduplicates(self, entries=None):
return None
def unwind(self, values, backend, **kwargs):
'''Unwind expression by applying *values* to the abstract nodes.
The ``kwargs`` dictionary can contain data which can be used
to override values
'''
if not hasattr(self, "_unwind_value"):
self._unwind_value = self._unwind(values, backend, **kwargs)
return self._unwind_value
def _unwind(self, values, backend, **kwargs):
raise NotImplementedError("Unwind method missing for %s" % self)
class BaseExpression(Expr):
'''Base class for single-value expression
'''
def __init__(self, value):
self.value = value
def info(self):
return str(self.value)
class BadExpression(BaseExpression):
'''A malformed expression
'''
def malformed(self):
return True
class Expression(BaseExpression):
'''Base class for single expression
'''
def symbols(self):
return self.value.symbols()
def removeduplicates(self, entries=None):
if entries is None:
entries = {}
if isinstance(self.value, Symbol):
c = entries.get(str(self.value), None)
if c:
ov = [self.value]
self.value = c
return ov
else:
entries[str(self.value)] = self.value
return None
else:
return self.value.removeduplicates(entries=entries)
class Number(BaseExpression):
'''
A simple number.
This expression is a constant numeric value
'''
def _unwind(self, values, backend, **kwargs):
return self.value
class String(BaseExpression):
'''
A simple string.
This expression is a constant numeric value
'''
def _unwind(self, values, backend, **kwargs):
return unwind.stringData(self.value)
class Parameter(BaseExpression):
def __init__(self, value):
super().__init__(str(value).lower())
class Symbol(BaseExpression):
'''Timeserie symbol. This expression is replaced by a timeserie value for the symbol
'''
def __init__(self, value, field=None):
value = settings.symboltransform(value)
if len(value) > 2 and value[0] == '`' and value[-1] == '`':
self.quotes = True
value = value[1:-1]
else:
self.quotes = False
super().__init__(str(value))
def info(self):
if self.quotes:
return '`{0}`'.format(self.value)
else:
return self.value
def symbols(self):
return [self.value]
def _unwind(self, values, backend, **kwargs):
sdata = values[self.value]
if is_timeseries(sdata):
return sdata
else:
ts = timeseries(name=str(self),
date=sdata['date'],
data=sdata['value'],
backend=backend)
# Uses this hack to make sure timeseries are ordered
# Lots of room for performance improvement
hash = ts.ashash()
hash.modified = True
ts = hash.getts()
values[ts.name] = ts
return ts
def lineardecomp(self):
return linearDecomp().append(self)
class MultiExpression(Expr):
'''Base class for expression involving two or more elements
'''
def __init__(self, concat_operator, concatenate=True):
self.__concatenate = concatenate
self.children = []
self.concat_operator = concat_operator
def malformed(self):
for child in self.children:
if child.malformed():
return True
return False
def __len__(self):
return len(self.children)
def __iter__(self):
return self.children.__iter__()
def info(self):
c = self.concat_operator
return reduce(lambda x, y: '%s%s%s' % (x, c, y), self.children)
def symbols(self):
cs = []
for c in self.children:
ns = c.symbols()
if ns:
for n in ns:
if n not in cs:
cs.append(n)
return cs
def append(self, el):
if isinstance(el, self.__class__) and self.__concatenate:
for c in el:
self.append(c)
elif isinstance(el,Expr):
self.children.append(el)
else:
raise ValueError("%s is not a valid grammar expression" % el)
return el
def __getitem__(self, idx):
return self.children[idx]
def removeduplicates(self, entries = None):
'''
Loop over children a remove duplicate entries.
@return - a list of removed entries
'''
removed = []
if entries == None:
entries = {}
new_children = []
for c in self.children:
cs = str(c)
cp = entries.get(cs,None)
if cp:
new_children.append(cp)
removed.append(c)
else:
dups = c.removeduplicates(entries)
if dups:
removed.extend(dups)
entries[cs] = c
new_children.append(c)
self.children = new_children
return removed
class ConcatOp(MultiExpression):
'''
Refinement of MultiExpression with a new constructor.
This class simply define a new __init__ method
'''
def __init__(self, left, right, op, concatenate=True):
super().__init__(op, concatenate=concatenate)
self.append(left)
self.append(right)
class ConcatenationOp(ConcatOp):
def __init__(self, left, right):
super().__init__(left, right, settings.concat_operator)
def info(self):
c = self.concat_operator
return reduce(lambda x, y: '%s%s %s' % (x, c, y), self.children)
def _unwind(self, values, backend, sametype=True, **kwargs):
result = []
for child in self:
result.append(child.unwind(values, backend, **kwargs))
return result
class SplittingOp(ConcatOp):
def __init__(self, left, right):
super().__init__(left, right, settings.separator_operator)
def _unwind(self, values, backend, **kwargs):
ts = unwind.listData(label = str(self))
for c in self:
v = c.unwind(values, backend)
ts.append(v)
return ts
class BinOp(ConcatOp):
def __init__(self, left, right, op):
if op in settings.special_operators:
raise ValueError('not a valid binary operator: %s' % op)
super().__init__(left, right, op, concatenate=False)
self.append = None
@property
def left(self):
return self[0]
@property
def right(self):
return self[1]
class EqualOp(BinOp):
'''Equal operator expression. For example
* ``window = 35``
* ``param = AMZN``
The left hand side is the name of the parameter, while the right-hand side
is the parameter values which can be a :class:`Symbol`.
The left hand side is **never** a symbol.
'''
def __init__(self,left,right):
if not isinstance(left,Parameter):
if not isinstance(left,Symbol):
raise ValueError('Left-hand-side of %s should be a string'
% self)
left = Parameter(left.value)
super().__init__(left, right, "=")
def _unwind(self, values, backend, **kwargs):
name = str(self.left)
if name in kwargs:
return {name: kwargs[name]}
else:
return {name: self.right.unwind(values, backend, **kwargs)}
class Bracket(Expression):
'''A :class:`dynts.dsl.Expr` class for enclosing group of
:class:`dynts.dsl.Expr`.
It forms the building block of :class:`dynts.dsl.Function`
and other operators.
'''
def __init__(self, value, pl, pr):
self.__pl = pl
self.__pr = pr
super().__init__(value)
def info(self):
return '%s%s%s' % (self.__pl, self.value, self.__pr)
def _unwind(self, *args, **kwargs):
data = self.value.unwind(*args, **kwargs)
if not isinstance(data,list):
data = [data]
args = []
kwargs = {}
for item in data:
if isinstance(item,dict):
kwargs.update(item)
else:
args.append(item)
return args, kwargs
class uMinus(Expression):
def info(self):
return '-%s' % self.value
def lineardecomp(self):
return linearDecomp().append(self,-1)
| {
"repo_name": "quantmind/dynts",
"path": "dynts/dsl/ast/base.py",
"copies": "1",
"size": "9833",
"license": "bsd-3-clause",
"hash": 1358479623120942000,
"line_mean": 25.9346590909,
"line_max": 88,
"alpha_frac": 0.5324926269,
"autogenerated": false,
"ratio": 4.301399825021872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333892451921872,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from numpy import array, ndarray, dtype
object_type = dtype(object)
def crossoperator(func, *args):
return [func(*vals) for vals in zip(*args)]
def scalarasiter(x):
if x is None:
return ()
elif hasattr(x, '__iter__'):
return x
else:
return (x,)
def asarray(x, dtype=None):
'''Convert ``x`` into a ``numpy.ndarray``.'''
iterable = scalarasiter(x)
if isinstance(iterable, ndarray):
return iterable
else:
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if dtype == object_type:
a = ndarray((len(iterable),), dtype=dtype)
for i,v in enumerate(iterable):
a[i] = v
return a
else:
return array(iterable, dtype=dtype)
def ascolumn(x, dtype = None):
'''Convert ``x`` into a ``column``-type ``numpy.ndarray``.'''
x = asarray(x, dtype)
return x if len(x.shape) >= 2 else x.reshape(len(x),1)
def assimple(x):
if hasattr(x, '__iter__'):
try:
len(x)
except:
x = list(x)
if len(x) == 1:
return x[0]
else:
return x
else:
return x
class cross:
'''Cross section wrapper class'''
min = lambda *args : crossoperator(min, *args)
max = lambda *args : crossoperator(max, *args)
def __init__(self, elem):
self.elem = asarray(elem)
def __iter__(self):
return iter(self.elem)
def __eq__(self, other):
return reduce(
lambda x, y: x and y[0] == y[1],
zip(self.elem, asarray(other)),
True
)
def __ge__(self, other):
return reduce(
lambda x, y: x and y[0] >= y[1],
zip(self.elem, asarray(other)),
True
)
def __le__(self, other):
return reduce(
lambda x, y: x and y[0] <= y[1],
zip(self.elem, asarray(other)),
True
)
def __gt__(self, other):
return not (self <= other)
def __lt__(self, other):
return not (self >= other)
| {
"repo_name": "quantmind/dynts",
"path": "dynts/utils/section.py",
"copies": "1",
"size": "2249",
"license": "bsd-3-clause",
"hash": 7510346334945448000,
"line_mean": 21.9255319149,
"line_max": 65,
"alpha_frac": 0.4828812806,
"autogenerated": false,
"ratio": 3.904513888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874781579317413,
"avg_score": 0.002522718034295006,
"num_lines": 94
} |
from functools import reduce
import wx
from gooey.gui import formatters, events
from gooey.gui.util import wx_util
from gooey.util.functional import getin, ifPresent
from gooey.gui.validators import runValidator
from gooey.gui.components.util.wrapped_static_text import AutoWrappedStaticText
class BaseWidget(wx.Panel):
widget_class = None
def arrange(self, label, text):
raise NotImplementedError
def getWidget(self, parent, **options):
return self.widget_class(parent, **options)
def connectSignal(self):
raise NotImplementedError
def getSublayout(self, *args, **kwargs):
raise NotImplementedError
def setValue(self, value):
raise NotImplementedError
def receiveChange(self, *args, **kwargs):
raise NotImplementedError
def dispatchChange(self, value, **kwargs):
raise NotImplementedError
def formatOutput(self, metatdata, value):
raise NotImplementedError
class TextContainer(BaseWidget):
widget_class = None
def __init__(self, parent, widgetInfo, *args, **kwargs):
super(TextContainer, self).__init__(parent, *args, **kwargs)
self.info = widgetInfo
self._id = widgetInfo['id']
self._meta = widgetInfo['data']
self._options = widgetInfo['options']
self.label = wx.StaticText(self, label=widgetInfo['data']['display_name'])
self.help_text = AutoWrappedStaticText(self, label=widgetInfo['data']['help'] or '')
self.error = AutoWrappedStaticText(self, label='')
self.error.Hide()
self.widget = self.getWidget(self)
self.layout = self.arrange(*args, **kwargs)
self.setColors()
self.SetSizer(self.layout)
self.Bind(wx.EVT_SIZE, self.onSize)
if self._meta['default']:
self.setValue(self._meta['default'])
def arrange(self, *args, **kwargs):
wx_util.make_bold(self.label)
wx_util.withColor(self.label, self._options['label_color'])
wx_util.withColor(self.help_text, self._options['help_color'])
wx_util.withColor(self.error, self._options['error_color'])
self.help_text.SetMinSize((0,-1))
layout = wx.BoxSizer(wx.VERTICAL)
if self._options.get('show_label', True):
layout.Add(self.label, 0, wx.EXPAND)
else:
self.label.Show(False)
layout.AddStretchSpacer(1)
layout.AddSpacer(2)
if self.help_text and self._options.get('show_help', True):
layout.Add(self.help_text, 1, wx.EXPAND)
layout.AddSpacer(2)
else:
self.help_text.Show(False)
layout.AddStretchSpacer(1)
layout.Add(self.getSublayout(), 0, wx.EXPAND)
layout.Add(self.error, 1, wx.EXPAND)
self.error.Hide()
return layout
def setColors(self):
wx_util.make_bold(self.label)
wx_util.withColor(self.label, self._options['label_color'])
wx_util.withColor(self.help_text, self._options['help_color'])
wx_util.withColor(self.error, self._options['error_color'])
if self._options.get('label_bg_color'):
self.label.SetBackgroundColour(self._options.get('label_bg_color'))
if self._options.get('help_bg_color'):
self.help_text.SetBackgroundColour(self._options.get('help_bg_color'))
if self._options.get('error_bg_color'):
self.error.SetBackgroundColour(self._options.get('error_bg_color'))
def getWidget(self, *args, **options):
return self.widget_class(*args, **options)
def getWidgetValue(self):
raise NotImplementedError
def getSublayout(self, *args, **kwargs):
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(self.widget, 1, wx.EXPAND)
return layout
def onSize(self, event):
# print(self.GetSize())
# self.error.Wrap(self.GetSize().width)
# self.help_text.Wrap(500)
# self.Layout()
event.Skip()
def getValue(self):
userValidator = getin(self._options, ['validator', 'test'], 'True')
message = getin(self._options, ['validator', 'message'], '')
testFunc = eval('lambda user_input: bool(%s)' % userValidator)
satisfies = testFunc if self._meta['required'] else ifPresent(testFunc)
value = self.getWidgetValue()
return {
'id': self._id,
'cmd': self.formatOutput(self._meta, value),
'rawValue': value,
'test': runValidator(satisfies, value),
'error': None if runValidator(satisfies, value) else message,
'clitype': 'positional'
if self._meta['required'] and not self._meta['commands']
else 'optional'
}
def setValue(self, value):
self.widget.SetValue(value)
def setErrorString(self, message):
self.error.SetLabel(message)
self.error.Wrap(self.Size.width)
self.Layout()
def showErrorString(self, b):
self.error.Wrap(self.Size.width)
self.error.Show(b)
def setOptions(self, values):
return None
def receiveChange(self, metatdata, value):
raise NotImplementedError
def dispatchChange(self, value, **kwargs):
raise NotImplementedError
def formatOutput(self, metadata, value):
raise NotImplementedError
class BaseChooser(TextContainer):
""" Base Class for the Chooser widget types """
def setValue(self, value):
self.widget.setValue(value)
def getWidgetValue(self):
return self.widget.getValue()
def formatOutput(self, metatdata, value):
return formatters.general(metatdata, value)
| {
"repo_name": "partrita/Gooey",
"path": "gooey/gui/components/widgets/bases.py",
"copies": "1",
"size": "5908",
"license": "mit",
"hash": 6502257795734131000,
"line_mean": 31.0055865922,
"line_max": 92,
"alpha_frac": 0.6076506432,
"autogenerated": false,
"ratio": 3.9891964888588793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5096847132058879,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, update_wrapper
def pipe_call(a, b):
return b(a)
def and_then(*f):
def and_then_call(x):
return reduce(pipe_call, f, x)
return and_then_call
class Pipe:
def __init__(self, f, l_args, r_args, kwargs, f_continue: tuple = None):
if not isinstance(f, str):
update_wrapper(self, f)
self.f = f
self.f_continue = f_continue
self.l_args = l_args
self.r_args = r_args
self.kwargs = kwargs
def __truediv__(self, other):
return Pipe(other, self.l_args, self.r_args, self.kwargs)
def __getattr__(self, name):
return Pipe(name, self.l_args, self.r_args, self.kwargs)
def __pow__(self, kwargs):
kwargs.update(self.kwargs)
return Pipe(self.f, self.l_args, self.r_args, self.kwargs)
def __mul__(self, args):
if not isinstance(args, tuple):
args = (args,)
return Pipe(self.f, self.l_args, self.r_args + args, self.kwargs)
def __matmul__(self, args):
if not isinstance(args, tuple):
args = (args,)
return Pipe(self.f, self.l_args + args, self.r_args, self.kwargs)
def __call__(self, left):
if isinstance(self.f, str):
v = getattr(left, self.f)(*self.l_args, *self.r_args, **self.kwargs)
else:
v = self.f(*self.l_args, left, *self.r_args, **self.kwargs)
if not self.f_continue:
return v
else:
return reduce(pipe_call, self.f_continue, v)
def __ror__(self, left):
return self(left)
e = Pipe(lambda x: x, (), (), dict())
| {
"repo_name": "manhong2112/CodeColle",
"path": "Python/waifu/pipe_fn.py",
"copies": "1",
"size": "1563",
"license": "mit",
"hash": 7037716326892806000,
"line_mean": 24.6229508197,
"line_max": 77,
"alpha_frac": 0.5822136916,
"autogenerated": false,
"ratio": 3.132264529058116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42144782206581166,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, wraps
from itertools import chain, tee
from random import choice
from string import digits
from django.utils.functional import curry
def just(*x, **kw):
return len(x) and x[0] or kw
def call(fn):
return fn()
def swap(a, fn):
return fn(a)
def unpack_args(fn):
return lambda t: fn(*t)
def throw(exception):
raise exception
def unidec(fnx):
'''
@unidec
def render(view, request, flag=True):
pass
@render
def first_view(request):
pass
@render(flag=False)
def second_view(request):
pass
'''
return lambda *ax, **kx: (
wraps(ax[0])(lambda *ay, **ky: fnx(ax[0], *ay, **ky)) \
if len(ax) == 1 and not kx and callable(ax[0]) else \
lambda fny: wraps(fny)(lambda *ay, **ky: \
fnx(fny, *ay, **dict(kx, **ky)) if not ax else throw(
TypeError('wrapper get *args'))))
def pluck(l, k):
return list(map(
lambda o: o.get(k) if isinstance(o, dict) \
else reduce(lambda x, y: getattr(x, y, None), k.split('.'), o), l))
def first(seq):
return list(seq)[0]
def select(i, l):
return l[i - len(l) * int(i / len(l))]
def factual(l):
return list(filter(bool, l))
def separate(fn, lx):
return map(list, map(
unpack_args(
lambda i, ly: filter(
lambda el: bool(i) == fn(el),
ly)),
enumerate(tee(lx, 2))))
def case(k, *dl, **kw):
return dict(len(dl) and dl[0] or {}, **kw).get(k)
@unidec
def dict_copy(fn, d, *a):
return {k: v for k, v in d.items() if fn(k, v, *a)}
@dict_copy
def pick(k, v, *a):
return k in a
@dict_copy
def omit(k, v, *a):
return not k in a
@dict_copy
def truthful(k, v, *a):
return bool(v)
def filter_dict(fn, d):
return dict_copy(fn)(d)
def reduce_dict(fn, d):
return map(unpack_args(fn), d.items())
@unidec
def dict_map(fnx, fny, d):
return dict(reduce_dict(lambda k, v: fnx(k, v, fny), d))
@dict_map
def map_keys(k, v, fn):
return (fn(k), v)
@dict_map
def map_values(k, v, fn):
return (k, fn(v))
def avg(*args):
return reduce(lambda x, y: x + y, args) / len(args)
def random_str(length=6, source=digits):
return ''.join(map(lambda i: choice(source), range(0, length)))
| {
"repo_name": "doctorzeb8/django-era",
"path": "era/utils/functools.py",
"copies": "1",
"size": "2291",
"license": "mit",
"hash": 5396153454546150000,
"line_mean": 19.6396396396,
"line_max": 75,
"alpha_frac": 0.5722391969,
"autogenerated": false,
"ratio": 2.7939024390243903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38661416359243905,
"avg_score": null,
"num_lines": null
} |
from functools import reduce, wraps
def foo1(*args):
for arg in args:
print(arg)
def foo2(**kwargs):
for key, value in kwargs.items():
print('{key} == {value}'.format(**locals()))
def fibon(n):
i, a, b = 1, 1, 2
while i <= n:
yield a
a, b = b, a + b
i += 1
def a_new_decorator(a_func):
@wraps(a_func)
def wrapTheFuncion():
print('start')
a_func()
print('end')
return wrapTheFuncion
def a_func():
print('this is a func')
@a_new_decorator
def a_func_dec():
a_func()
def func_dec(f):
@wraps(f)
def decorated(*args, **kwargs):
print('start')
f(*args, **kwargs)
print('end')
return decorated
@func_dec
@func_dec
def func_be_dec(n):
print(n)
if __name__ == '__main__':
l = range(4)
d = dict(a=1, b=2, c=3, d=4)
foo1(*l)
foo2(**d)
for i, v in enumerate(fibon(5)):
print(i + 1, v)
print([i * i for i in range(10)])
print([i for i in range(-5, 5) if i < 0])
print(reduce((lambda x, y: x * y), [1, 2, 3, 4]))
print(1 if True else 2)
dec = a_new_decorator(a_func)
dec()
a_func_dec()
print(a_func_dec.__name__)
func_be_dec(1)
| {
"repo_name": "amozie/amozie",
"path": "studzie/interpy_pdf.py",
"copies": "1",
"size": "1238",
"license": "apache-2.0",
"hash": -1695817052362641700,
"line_mean": 16.1944444444,
"line_max": 53,
"alpha_frac": 0.5105008078,
"autogenerated": false,
"ratio": 2.845977011494253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8852392851973991,
"avg_score": 0.0008169934640522876,
"num_lines": 72
} |
from functools import singledispatch
from abc import ABC
import cgen as c
from devito.ir import (DummyEq, Call, Conditional, List, Prodder, ParallelIteration,
ParallelBlock, PointerCast, EntryFunction, LocalExpression)
from devito.mpi.distributed import MPICommObject
from devito.passes.iet.engine import iet_pass
from devito.symbolics import Byref, CondNe
from devito.tools import as_list
from devito.types import DeviceID, Symbol
__all__ = ['LangBB', 'LangTransformer']
class LangMeta(type):
"""
Metaclass for class-level mappers.
"""
mapper = {}
def __getitem__(self, k):
if k not in self.mapper:
raise NotImplementedError("Missing required mapping for `%s`" % k)
return self.mapper[k]
class LangBB(object, metaclass=LangMeta):
"""
Abstract base class for Language Building Blocks.
"""
# NOTE: a subclass may want to override the values below, which represent
# IET node types used in the various lowering and/or transformation passes
Region = ParallelBlock
HostIteration = ParallelIteration
DeviceIteration = ParallelIteration
Prodder = Prodder
PointerCast = PointerCast
@classmethod
def _map_to(cls, f, imask=None, queueid=None):
"""
Allocate and copy Function from host to device memory.
"""
raise NotImplementedError
@classmethod
def _map_to_wait(cls, f, imask=None, queueid=None):
"""
Allocate and copy Function from host to device memory and explicitly wait.
"""
raise NotImplementedError
@classmethod
def _map_alloc(cls, f, imask=None):
"""
Allocate Function in device memory.
"""
raise NotImplementedError
@classmethod
def _map_present(cls, f, imask=None):
"""
Explicitly flag Function as present in device memory.
"""
raise NotImplementedError
@classmethod
def _map_update(cls, f):
"""
Copyi Function from device to host memory.
"""
raise NotImplementedError
@classmethod
def _map_update_host(cls, f, imask=None, queueid=None):
"""
Copy Function from device to host memory (alternative to _map_update).
"""
raise NotImplementedError
@classmethod
def _map_update_wait_host(cls, f, imask=None, queueid=None):
"""
Copy Function from device to host memory and explicitly wait.
"""
raise NotImplementedError
@classmethod
def _map_update_device(cls, f, imask=None, queueid=None):
"""
Copy Function from host to device memory.
"""
raise NotImplementedError
@classmethod
def _map_update_wait_device(cls, f, imask=None, queueid=None):
"""
Copy Function from host to device memory and explicitly wait.
"""
raise NotImplementedError
@classmethod
def _map_release(cls, f, devicerm=None):
"""
Release device pointer to a Function.
"""
raise NotImplementedError
@classmethod
def _map_delete(cls, f, imask=None, devicerm=None):
"""
Delete Function from device memory.
"""
raise NotImplementedError
class LangTransformer(ABC):
"""
Abstract base class defining a series of methods capable of specializing
an IET for a certain target language (e.g., C, C+OpenMP).
"""
lang = LangBB
"""
The constructs of the target language. To be specialized by a subclass.
"""
def __init__(self, key, sregistry, platform):
"""
Parameters
----------
key : callable, optional
Return True if an Iteration can and should be parallelized, False otherwise.
sregistry : SymbolRegistry
The symbol registry, to access the symbols appearing in an IET.
platform : Platform
The underlying platform.
"""
if key is not None:
self.key = key
else:
self.key = lambda i: False
self.sregistry = sregistry
self.platform = platform
@iet_pass
def make_parallel(self, iet):
"""
An `iet_pass` which transforms an IET for shared-memory parallelism.
"""
return iet, {}
@iet_pass
def make_simd(self, iet):
"""
An `iet_pass` which transforms an IET for SIMD parallelism.
"""
return iet, {}
@iet_pass
def initialize(self, iet):
"""
An `iet_pass` which transforms an IET such that the target language
runtime is initialized.
"""
return iet, {}
@property
def Region(self):
return self.lang.Region
@property
def HostIteration(self):
return self.lang.HostIteration
@property
def DeviceIteration(self):
return self.lang.DeviceIteration
@property
def Prodder(self):
return self.lang.Prodder
class DeviceAwareMixin(object):
@iet_pass
def initialize(self, iet):
"""
An `iet_pass` which transforms an IET such that the target language
runtime is initialized.
The initialization follows a pattern which is applicable to virtually
any target language:
1. Calling the init function (e.g., `acc_init(...)` for OpenACC)
2. Assignment of the target device to a host thread or an MPI rank
3. Introduction of user-level symbols (e.g., `deviceid` to allow
users to select a specific device)
Despite not all of these are applicable to all target languages, there
is sufficient reuse to implement the logic as a single method.
"""
@singledispatch
def _initialize(iet):
return iet, {}
@_initialize.register(EntryFunction)
def _(iet):
# TODO: we need to pick the rank from `comm_shm`, not `comm`,
# so that we have nranks == ngpus (as long as the user has launched
# the right number of MPI processes per node given the available
# number of GPUs per node)
objcomm = None
for i in iet.parameters:
if isinstance(i, MPICommObject):
objcomm = i
break
devicetype = as_list(self.lang[self.platform])
try:
lang_init = [self.lang['init'](devicetype)]
except TypeError:
# Not all target languages need to be explicitly initialized
lang_init = []
deviceid = DeviceID()
if objcomm is not None:
rank = Symbol(name='rank')
rank_decl = LocalExpression(DummyEq(rank, 0))
rank_init = Call('MPI_Comm_rank', [objcomm, Byref(rank)])
ngpus = Symbol(name='ngpus')
call = self.lang['num-devices'](devicetype)
ngpus_init = LocalExpression(DummyEq(ngpus, call))
osdd_then = self.lang['set-device']([deviceid] + devicetype)
osdd_else = self.lang['set-device']([rank % ngpus] + devicetype)
body = lang_init + [Conditional(
CondNe(deviceid, -1),
osdd_then,
List(body=[rank_decl, rank_init, ngpus_init, osdd_else]),
)]
header = c.Comment('Begin of %s+MPI setup' % self.lang['name'])
footer = c.Comment('End of %s+MPI setup' % self.lang['name'])
else:
body = lang_init + [Conditional(
CondNe(deviceid, -1),
self.lang['set-device']([deviceid] + devicetype)
)]
header = c.Comment('Begin of %s setup' % self.lang['name'])
footer = c.Comment('End of %s setup' % self.lang['name'])
init = List(header=header, body=body, footer=(footer, c.Line()))
iet = iet._rebuild(body=(init,) + iet.body)
return iet, {'args': deviceid}
return _initialize(iet)
@iet_pass
def make_gpudirect(self, iet):
"""
An `iet_pass` which transforms an IET modifying all MPI Callables such
that device pointers are used in place of host pointers, thus exploiting
GPU-direct communication.
"""
return iet, {}
| {
"repo_name": "opesci/devito",
"path": "devito/passes/iet/langbase.py",
"copies": "1",
"size": "8429",
"license": "mit",
"hash": -2506433137338446300,
"line_mean": 28.9964412811,
"line_max": 88,
"alpha_frac": 0.5842923241,
"autogenerated": false,
"ratio": 4.335905349794238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00025547474912951157,
"num_lines": 281
} |
from functools import singledispatch
from llvmlite import ir
from numba.core import types, cgutils
from numba.core.imputils import Registry
from numba.cuda import nvvmutils
registry = Registry()
lower = registry.lower
voidptr = ir.PointerType(ir.IntType(8))
# NOTE: we don't use @lower here since print_item() doesn't return a LLVM value
@singledispatch
def print_item(ty, context, builder, val):
"""
Handle printing of a single value of the given Numba type.
A (format string, [list of arguments]) is returned that will allow
forming the final printf()-like call.
"""
raise NotImplementedError("printing unimplemented for values of type %s"
% (ty,))
@print_item.register(types.Integer)
@print_item.register(types.IntegerLiteral)
def int_print_impl(ty, context, builder, val):
if ty in types.unsigned_domain:
rawfmt = "%llu"
dsttype = types.uint64
else:
rawfmt = "%lld"
dsttype = types.int64
lld = context.cast(builder, val, ty, dsttype)
return rawfmt, [lld]
@print_item.register(types.Float)
def real_print_impl(ty, context, builder, val):
lld = context.cast(builder, val, ty, types.float64)
return "%f", [lld]
@print_item.register(types.StringLiteral)
def const_print_impl(ty, context, builder, sigval):
pyval = ty.literal_value
assert isinstance(pyval, str) # Ensured by lowering
rawfmt = "%s"
val = context.insert_string_const_addrspace(builder, pyval)
return rawfmt, [val]
@lower(print, types.VarArg(types.Any))
def print_varargs(context, builder, sig, args):
"""This function is a generic 'print' wrapper for arbitrary types.
It dispatches to the appropriate 'print' implementations above
depending on the detected real types in the signature."""
vprint = nvvmutils.declare_vprint(builder.module)
formats = []
values = []
for i, (argtype, argval) in enumerate(zip(sig.args, args)):
argfmt, argvals = print_item(argtype, context, builder, argval)
formats.append(argfmt)
values.extend(argvals)
rawfmt = " ".join(formats) + "\n"
fmt = context.insert_string_const_addrspace(builder, rawfmt)
array = cgutils.make_anonymous_struct(builder, values)
arrayptr = cgutils.alloca_once_value(builder, array)
vprint = nvvmutils.declare_vprint(builder.module)
builder.call(vprint, (fmt, builder.bitcast(arrayptr, voidptr)))
return context.get_dummy_value()
| {
"repo_name": "stonebig/numba",
"path": "numba/cuda/printimpl.py",
"copies": "5",
"size": "2478",
"license": "bsd-2-clause",
"hash": -7359692120917400000,
"line_mean": 30.7692307692,
"line_max": 79,
"alpha_frac": 0.691283293,
"autogenerated": false,
"ratio": 3.504950495049505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
from functools import singledispatch
from llvmlite.llvmpy.core import Type
from numba.core import types, cgutils
from numba.core.imputils import Registry
from numba.cuda import nvvmutils
registry = Registry()
lower = registry.lower
voidptr = Type.pointer(Type.int(8))
# NOTE: we don't use @lower here since print_item() doesn't return a LLVM value
@singledispatch
def print_item(ty, context, builder, val):
"""
Handle printing of a single value of the given Numba type.
A (format string, [list of arguments]) is returned that will allow
forming the final printf()-like call.
"""
raise NotImplementedError("printing unimplemented for values of type %s"
% (ty,))
@print_item.register(types.Integer)
@print_item.register(types.IntegerLiteral)
def int_print_impl(ty, context, builder, val):
if ty in types.unsigned_domain:
rawfmt = "%llu"
dsttype = types.uint64
else:
rawfmt = "%lld"
dsttype = types.int64
lld = context.cast(builder, val, ty, dsttype)
return rawfmt, [lld]
@print_item.register(types.Float)
def real_print_impl(ty, context, builder, val):
lld = context.cast(builder, val, ty, types.float64)
return "%f", [lld]
@print_item.register(types.StringLiteral)
def const_print_impl(ty, context, builder, sigval):
pyval = ty.literal_value
assert isinstance(pyval, str) # Ensured by lowering
rawfmt = "%s"
val = context.insert_string_const_addrspace(builder, pyval)
return rawfmt, [val]
@lower(print, types.VarArg(types.Any))
def print_varargs(context, builder, sig, args):
"""This function is a generic 'print' wrapper for arbitrary types.
It dispatches to the appropriate 'print' implementations above
depending on the detected real types in the signature."""
vprint = nvvmutils.declare_vprint(builder.module)
formats = []
values = []
for i, (argtype, argval) in enumerate(zip(sig.args, args)):
argfmt, argvals = print_item(argtype, context, builder, argval)
formats.append(argfmt)
values.extend(argvals)
rawfmt = " ".join(formats) + "\n"
fmt = context.insert_string_const_addrspace(builder, rawfmt)
array = cgutils.make_anonymous_struct(builder, values)
arrayptr = cgutils.alloca_once_value(builder, array)
vprint = nvvmutils.declare_vprint(builder.module)
builder.call(vprint, (fmt, builder.bitcast(arrayptr, voidptr)))
return context.get_dummy_value()
| {
"repo_name": "seibert/numba",
"path": "numba/cuda/printimpl.py",
"copies": "1",
"size": "2488",
"license": "bsd-2-clause",
"hash": 3040421818345085400,
"line_mean": 30.8974358974,
"line_max": 79,
"alpha_frac": 0.6917202572,
"autogenerated": false,
"ratio": 3.49438202247191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.468610227967191,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from numbers import Integral
import numpy as np
from numba import njit
from scipy import sparse
@singledispatch
def is_constant(a, axis=None) -> np.ndarray:
"""
Check whether values in array are constant.
Params
------
a
Array to check
axis
Axis to reduce over.
Returns
-------
Boolean array, True values were constant.
Example
-------
>>> a = np.array([[0, 1], [0, 0]])
>>> a
array([[0, 1],
[0, 0]])
>>> is_constant(a)
False
>>> is_constant(a, axis=0)
array([ False, True])
>>> is_constant(a, axis=1)
array([ True, False])
"""
raise NotImplementedError()
@is_constant.register(np.ndarray)
def _(a, axis=None):
# Should eventually support nd, not now.
if axis is None:
return np.array_equal(a, a.flat[0])
if not isinstance(axis, Integral):
raise TypeError("axis must be integer or None.")
assert axis in (0, 1)
if axis == 0:
return _is_constant_rows(a.T)
elif axis == 1:
return _is_constant_rows(a)
def _is_constant_rows(a):
b = np.broadcast_to(a[:, 0][:, np.newaxis], a.shape)
return (a == b).all(axis=1)
@is_constant.register(sparse.csr_matrix)
def _(a, axis=None):
if axis is None:
if len(a.data) == np.multiply(*a.shape):
return is_constant(a.data)
else:
return (a.data == 0).all()
if not isinstance(axis, Integral):
raise TypeError("axis must be integer or None.")
assert axis in (0, 1)
if axis == 1:
return _is_constant_csr_rows(a.data, a.indices, a.indptr, a.shape)
elif axis == 0:
a = a.T.tocsr()
return _is_constant_csr_rows(a.data, a.indices, a.indptr, a.shape)
@njit
def _is_constant_csr_rows(data, indices, indptr, shape):
N = len(indptr) - 1
result = np.ones(N, dtype=np.bool_)
for i in range(N):
start = indptr[i]
stop = indptr[i + 1]
if stop - start == shape[1]:
val = data[start]
else:
val = 0
for j in range(start, stop):
if data[j] != val:
result[i] = False
break
return result
@is_constant.register(sparse.csc_matrix)
def _(a, axis=None):
if axis is None:
if len(a.data) == np.multiply(*a.shape):
return is_constant(a.data)
else:
return (a.data == 0).all()
if not isinstance(axis, Integral):
raise TypeError("axis must be integer or None.")
assert axis in (0, 1)
if axis == 0:
return _is_constant_csr_rows(a.data, a.indices, a.indptr, a.shape[::-1])
elif axis == 1:
a = a.T.tocsc()
return _is_constant_csr_rows(a.data, a.indices, a.indptr, a.shape[::-1])
| {
"repo_name": "theislab/scanpy",
"path": "scanpy/_utils/compute/is_constant.py",
"copies": "1",
"size": "2817",
"license": "bsd-3-clause",
"hash": 6770869195300949000,
"line_mean": 24.3783783784,
"line_max": 80,
"alpha_frac": 0.5583954562,
"autogenerated": false,
"ratio": 3.3696172248803826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4428012681080382,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from os import PathLike
from pathlib import Path
from typing import Optional, Union, Iterator
import h5py
from . import anndata
from .sparse_dataset import SparseDataset
from ..compat import Literal, ZarrArray
class AnnDataFileManager:
"""Backing file manager for AnnData."""
def __init__(
self,
adata: "anndata.AnnData",
filename: Optional[PathLike] = None,
filemode: Optional[Literal["r", "r+"]] = None,
):
self._adata = adata
self.filename = filename
self._filemode = filemode
self._file = None
if filename:
self.open()
def __repr__(self) -> str:
if self.filename is None:
return "Backing file manager: no file is set."
else:
return f"Backing file manager of file {self.filename}."
def __contains__(self, x) -> bool:
return x in self._file
def __iter__(self) -> Iterator[str]:
return iter(self._file)
def __getitem__(self, key: str) -> Union[h5py.Group, h5py.Dataset, SparseDataset]:
return self._file[key]
def __setitem__(
self, key: str, value: Union[h5py.Group, h5py.Dataset, SparseDataset]
):
self._file[key] = value
def __delitem__(self, key: str):
del self._file[key]
@property
def filename(self) -> Path:
return self._filename
@filename.setter
def filename(self, filename: Optional[PathLike]):
self._filename = None if filename is None else Path(filename)
def open(
self,
filename: Optional[PathLike] = None,
filemode: Optional[Literal["r", "r+"]] = None,
):
if filename is not None:
self.filename = filename
if filemode is not None:
self._filemode = filemode
if self.filename is None:
raise ValueError("Cannot open backing file if backing not initialized.")
self._file = h5py.File(self.filename, self._filemode)
def close(self):
"""Close the backing file, remember filename, do *not* change to memory mode."""
if self._file is not None:
self._file.close()
def _to_memory_mode(self):
"""Close the backing file, forget filename, *do* change to memory mode."""
self._adata.__X = self._adata.X[()]
self._file.close()
self._file = None
self._filename = None
@property
def is_open(self) -> bool:
"""State of backing file."""
if self._file is None:
return False
# try accessing the id attribute to see if the file is open
return bool(self._file.id)
@singledispatch
def to_memory(x):
"""Permissivley convert objects to in-memory representation.
If they already are in-memory, (or are just unrecognized) pass a copy through.
"""
return x.copy()
@to_memory.register(ZarrArray)
@to_memory.register(h5py.Dataset)
def _(x):
return x[...]
@to_memory.register(SparseDataset)
def _(x: SparseDataset):
return x.to_memory()
| {
"repo_name": "theislab/anndata",
"path": "anndata/_core/file_backing.py",
"copies": "1",
"size": "3074",
"license": "bsd-3-clause",
"hash": 3508439657661030000,
"line_mean": 26.6936936937,
"line_max": 88,
"alpha_frac": 0.6060507482,
"autogenerated": false,
"ratio": 3.925925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031976674125926,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
@singledispatch
def permutate(y1, y2, cost_func: Optional[Callable] = None, returns_cost: bool = False):
"""Find cost-minimizing permutation
Parameters
----------
y1 : np.ndarray or torch.Tensor
(batch_size, num_samples, num_classes_1)
y2 : np.ndarray or torch.Tensor
(num_samples, num_classes_2) or (batch_size, num_samples, num_classes_2)
cost_func : callable
Takes two (num_samples, num_classes) sequences and returns (num_classes, ) pairwise cost.
Defaults to computing mean squared error.
returns_cost : bool, optional
Whether to return cost matrix. Defaults to False.
Returns
-------
permutated_y2 : np.ndarray or torch.Tensor
(batch_size, num_samples, num_classes_1)
permutations : list of tuple
List of permutations so that permutation[i] == j indicates that jth speaker of y2
should be mapped to ith speaker of y1.
cost : np.ndarray or torch.Tensor, optional
(batch_size, num_classes_1, num_classes_2)
"""
raise TypeError()
def mse_cost_func(Y, y):
return torch.mean(F.mse_loss(Y, y, reduction="none"), axis=0)
@permutate.register
def permutate_torch(
y1: torch.Tensor,
y2: torch.Tensor,
cost_func: Optional[Callable] = None,
returns_cost: bool = False,
) -> Tuple[torch.Tensor, List[Tuple[int]]]:
batch_size, num_samples, num_classes_1 = y1.shape
if len(y2.shape) == 2:
y2 = y2.expand(batch_size, -1, -1)
if len(y2.shape) != 3:
msg = "Incorrect shape: should be (batch_size, num_frames, num_classes)."
raise ValueError(msg)
batch_size_, num_samples_, num_classes_2 = y2.shape
if batch_size != batch_size_ or num_samples != num_samples_:
msg = f"Shape mismatch: {tuple(y1.shape)} vs. {tuple(y2.shape)}."
raise ValueError(msg)
if cost_func is None:
cost_func = mse_cost_func
permutations = []
permutated_y2 = []
if returns_cost:
costs = []
permutated_y2 = torch.zeros(y1.shape, device=y2.device, dtype=y2.dtype)
for b, (y1_, y2_) in enumerate(zip(y1, y2)):
with torch.no_grad():
cost = torch.stack(
[
cost_func(y2_, y1_[:, i : i + 1].expand(-1, num_classes_2))
for i in range(num_classes_1)
],
)
if num_classes_2 > num_classes_1:
padded_cost = F.pad(
cost,
(0, 0, 0, num_classes_2 - num_classes_1),
"constant",
torch.max(cost) + 1,
)
else:
padded_cost = cost
permutation = [None] * num_classes_1
for k1, k2 in zip(*linear_sum_assignment(padded_cost.cpu())):
if k1 < num_classes_1:
permutation[k1] = k2
permutated_y2[b, :, k1] = y2_[:, k2]
permutations.append(tuple(permutation))
if returns_cost:
costs.append(cost)
if returns_cost:
return permutated_y2, permutations, torch.stack(costs)
return permutated_y2, permutations
@permutate.register
def permutate_numpy(
y1: np.ndarray,
y2: np.ndarray,
cost_func: Optional[Callable] = None,
returns_cost: bool = False,
) -> Tuple[np.ndarray, List[Tuple[int]]]:
output = permutate(
torch.from_numpy(y1),
torch.from_numpy(y2),
cost_func=cost_func,
returns_cost=returns_cost,
)
if returns_cost:
permutated_y2, permutations, costs = output
return permutated_y2.numpy(), permutations, costs.numpy()
permutated_y2, permutations = output
return permutated_y2.numpy(), permutations
| {
"repo_name": "pyannote/pyannote-audio",
"path": "pyannote/audio/utils/permutation.py",
"copies": "1",
"size": "3909",
"license": "mit",
"hash": 1529482963346174000,
"line_mean": 28.8396946565,
"line_max": 97,
"alpha_frac": 0.6001534919,
"autogenerated": false,
"ratio": 3.45929203539823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45594455272982304,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from typing import Dict
from .expressions import (
Expression, Operation, Wildcard, AssociativeOperation, CommutativeOperation, SymbolWildcard, Pattern, OneIdentityOperation
)
__all__ = [
'is_constant', 'is_syntactic', 'get_head', 'match_head', 'preorder_iter', 'preorder_iter_with_position',
'is_anonymous', 'contains_variables_from_set', 'create_operation_expression',
'rename_variables', 'op_iter', 'op_len', 'get_variables'
]
def is_constant(expression):
"""Check if the given expression is constant, i.e. it does not contain Wildcards."""
if isinstance(expression, Wildcard):
return False
if isinstance(expression, Expression):
return expression.is_constant
if isinstance(expression, Operation):
return all(is_constant(o) for o in op_iter(expression))
return True
def is_syntactic(expression):
"""
Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or
associative/commutative operations.
"""
if isinstance(expression, Wildcard):
return expression.fixed_size
if isinstance(expression, Expression):
return expression.is_syntactic
if isinstance(expression, (AssociativeOperation, CommutativeOperation)):
return False
if isinstance(expression, Operation):
return all(is_syntactic(o) for o in op_iter(expression))
return True
def get_head(expression):
"""Returns the given expression's head."""
if isinstance(expression, Wildcard):
if isinstance(expression, SymbolWildcard):
return expression.symbol_type
return None
return type(expression)
def match_head(subject, pattern):
"""Checks if the head of subject matches the pattern's head."""
if isinstance(pattern, Pattern):
pattern = pattern.expression
pattern_head = get_head(pattern)
if pattern_head is None:
return True
if issubclass(pattern_head, OneIdentityOperation):
return True
subject_head = get_head(subject)
assert subject_head is not None
return issubclass(subject_head, pattern_head)
def preorder_iter(expression):
"""Iterate over the expression in preorder."""
yield expression
if isinstance(expression, Operation):
for operand in op_iter(expression):
yield from preorder_iter(operand)
def preorder_iter_with_position(expression):
"""Iterate over the expression in preorder.
Also yields the position of each subexpression.
"""
yield expression, ()
if isinstance(expression, Operation):
for i, operand in enumerate(op_iter(expression)):
for child, pos in preorder_iter_with_position(operand):
yield child, (i, ) + pos
def is_anonymous(expression):
"""Returns True iff the expression does not contain any variables."""
if hasattr(expression, 'variable_name') and expression.variable_name:
return False
if isinstance(expression, Operation):
return all(is_anonymous(o) for o in op_iter(expression))
return True
def contains_variables_from_set(expression, variables):
"""Returns True iff the expression contains any of the variables from the given set."""
if hasattr(expression, 'variable_name') and expression.variable_name in variables:
return True
if isinstance(expression, Operation):
return any(contains_variables_from_set(o, variables) for o in op_iter(expression))
return False
def get_variables(expression, variables=None):
"""Returns the set of variable names in the given expression."""
if variables is None:
variables = set()
if hasattr(expression, 'variable_name') and expression.variable_name is not None:
variables.add(expression.variable_name)
if isinstance(expression, Operation):
for operand in op_iter(expression):
get_variables(operand, variables)
return variables
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:
"""Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables.
"""
if isinstance(expression, Operation):
if hasattr(expression, 'variable_name'):
variable_name = renaming.get(expression.variable_name, expression.variable_name)
return create_operation_expression(
expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name
)
operands = [rename_variables(o, renaming) for o in op_iter(expression)]
return create_operation_expression(expression, operands)
elif isinstance(expression, Expression):
expression = expression.__copy__()
expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)
return expression
@singledispatch
def create_operation_expression(old_operation, new_operands, variable_name=True):
if variable_name is True:
variable_name = getattr(old_operation, 'variable_name', None)
if variable_name is False:
return operation(*new_operands)
return type(old_operation)(*new_operands, variable_name=variable_name)
@create_operation_expression.register(list)
@create_operation_expression.register(tuple)
@create_operation_expression.register(set)
@create_operation_expression.register(frozenset)
@create_operation_expression.register(dict)
def _(old_operation, new_operands, variable_name=True):
return type(old_operation)(new_operands)
@singledispatch
def op_iter(operation):
return iter(operation)
@op_iter.register(dict)
def _(operation):
return iter(operation.items())
@singledispatch
def op_len(operation):
return len(operation)
| {
"repo_name": "wheerd/patternmatcher",
"path": "matchpy/expressions/functions.py",
"copies": "2",
"size": "6078",
"license": "mit",
"hash": -5422208405158384000,
"line_mean": 34.3372093023,
"line_max": 126,
"alpha_frac": 0.7025337282,
"autogenerated": false,
"ratio": 4.31063829787234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.601317202607234,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from typing import List, Optional
import strawberry
from django import forms
from django.core.exceptions import ImproperlyConfigured
from strawberry.types.datetime import Date, DateTime, Time
@singledispatch
def convert_form_field(field):
raise ImproperlyConfigured(
"Don't know how to convert the Django form field %s (%s) "
"to type" % (field, field.__class__)
)
def type_or_optional_wrapped(type_, required):
if required:
return type_
return Optional[type_]
@convert_form_field.register(forms.fields.BaseTemporalField)
@convert_form_field.register(forms.CharField)
@convert_form_field.register(forms.EmailField)
@convert_form_field.register(forms.SlugField)
@convert_form_field.register(forms.URLField)
@convert_form_field.register(forms.ChoiceField)
@convert_form_field.register(forms.RegexField)
@convert_form_field.register(forms.Field)
def convert_form_field_to_string(field):
return (
type_or_optional_wrapped(str, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.UUIDField)
def convert_form_field_to_uuid(field):
return (
type_or_optional_wrapped(str, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.IntegerField)
@convert_form_field.register(forms.NumberInput)
def convert_form_field_to_int(field):
return (
type_or_optional_wrapped(int, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.BooleanField)
def convert_form_field_to_boolean(field):
return (bool, strawberry.field(description=field.help_text, is_input=True))
@convert_form_field.register(forms.NullBooleanField)
def convert_form_field_to_nullboolean(field):
return (
Optional[bool],
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.DecimalField)
@convert_form_field.register(forms.FloatField)
def convert_form_field_to_float(field):
return (
type_or_optional_wrapped(float, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.ModelMultipleChoiceField)
def convert_form_field_to_list(field):
return (
type_or_optional_wrapped(List[strawberry.ID], field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.DateField)
def convert_form_field_to_date(field):
return (
type_or_optional_wrapped(Date, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.DateTimeField)
def convert_form_field_to_datetime(field):
return (
type_or_optional_wrapped(DateTime, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.TimeField)
def convert_form_field_to_time(field):
return (
type_or_optional_wrapped(Time, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
@convert_form_field.register(forms.ModelChoiceField)
def convert_form_field_to_id(field):
return (
type_or_optional_wrapped(strawberry.ID, field.required),
strawberry.field(description=field.help_text, is_input=True),
)
| {
"repo_name": "patrick91/pycon",
"path": "backend/strawberry_forms/converter.py",
"copies": "1",
"size": "3512",
"license": "mit",
"hash": -1175322615337260800,
"line_mean": 29.275862069,
"line_max": 79,
"alpha_frac": 0.7269362187,
"autogenerated": false,
"ratio": 3.38996138996139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461689760866139,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from typing import Optional, Union
import warnings
from anndata import AnnData
from scanpy.get import _get_obs_rep
import numba
import numpy as np
import pandas as pd
from scipy import sparse
@singledispatch
def gearys_c(
adata: AnnData,
*,
vals: Optional[Union[np.ndarray, sparse.spmatrix]] = None,
use_graph: Optional[str] = None,
layer: Optional[str] = None,
obsm: Optional[str] = None,
obsp: Optional[str] = None,
use_raw: bool = False,
) -> Union[np.ndarray, float]:
r"""
Calculate `Geary's C <https://en.wikipedia.org/wiki/Geary's_C>`_, as used
by `VISION <https://doi.org/10.1038/s41467-019-12235-0>`_.
Geary's C is a measure of autocorrelation for some measure on a graph. This
can be to whether measures are correlated between neighboring cells. Lower
values indicate greater correlation.
.. math::
C =
\frac{
(N - 1)\sum_{i,j} w_{i,j} (x_i - x_j)^2
}{
2W \sum_i (x_i - \bar{x})^2
}
Params
------
adata
vals
Values to calculate Geary's C for. If this is two dimensional, should
be of shape `(n_features, n_cells)`. Otherwise should be of shape
`(n_cells,)`. This matrix can be selected from elements of the anndata
object by using key word arguments: `layer`, `obsm`, `obsp`, or
`use_raw`.
use_graph
Key to use for graph in anndata object. If not provided, default
neighbors connectivities will be used instead.
layer
Key for `adata.layers` to choose `vals`.
obsm
Key for `adata.obsm` to choose `vals`.
obsp
Key for `adata.obsp` to choose `vals`.
use_raw
Whether to use `adata.raw.X` for `vals`.
This function can also be called on the graph and values directly. In this case
the signature looks like:
Params
------
g
The graph
vals
The values
See the examples for more info.
Returns
-------
If vals is two dimensional, returns a 1 dimensional ndarray array. Returns
a scalar if `vals` is 1d.
Examples
--------
Calculate Gearys C for each components of a dimensionality reduction:
.. code:: python
import scanpy as sc, numpy as np
pbmc = sc.datasets.pbmc68k_processed()
pc_c = sc.metrics.gearys_c(pbmc, obsm="X_pca")
It's equivalent to call the function directly on the underlying arrays:
.. code:: python
alt = sc.metrics.gearys_c(pbmc.obsp["connectivities"], pbmc.obsm["X_pca"].T)
np.testing.assert_array_equal(pc_c, alt)
"""
if use_graph is None:
# Fix for anndata<0.7
if hasattr(adata, "obsp") and "connectivities" in adata.obsp:
g = adata.obsp["connectivities"]
elif "neighbors" in adata.uns:
g = adata.uns["neighbors"]["connectivities"]
else:
raise ValueError("Must run neighbors first.")
else:
raise NotImplementedError()
if vals is None:
vals = _get_obs_rep(adata, use_raw=use_raw, layer=layer, obsm=obsm, obsp=obsp).T
return gearys_c(g, vals)
###############################################################################
# Calculation
###############################################################################
# Some notes on the implementation:
# * This could be phrased as tensor multiplication. However that does not get
# parallelized, which boosts performance almost linearly with cores.
# * Due to the umap setting the default threading backend, a parallel numba
# function that calls another parallel numba function can get stuck. This
# ends up meaning code re-use will be limited until umap 0.4.
# See: https://github.com/lmcinnes/umap/issues/306
# * There can be a fair amount of numerical instability here (big reductions),
# so data is cast to float64. Removing these casts/ conversion will cause the
# tests to fail.
@numba.njit(cache=True, parallel=True)
def _gearys_c_vec(data, indices, indptr, x):
W = data.sum()
return _gearys_c_vec_W(data, indices, indptr, x, W)
@numba.njit(cache=True, parallel=True)
def _gearys_c_vec_W(data, indices, indptr, x, W):
N = len(indptr) - 1
x = x.astype(np.float_)
x_bar = x.mean()
total = 0.0
for i in numba.prange(N):
s = slice(indptr[i], indptr[i + 1])
i_indices = indices[s]
i_data = data[s]
total += np.sum(i_data * ((x[i] - x[i_indices]) ** 2))
numer = (N - 1) * total
denom = 2 * W * ((x - x_bar) ** 2).sum()
C = numer / denom
return C
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Inner functions (per element C)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For calling gearys_c on collections.
# TODO: These are faster if we can compile them in parallel mode. However,
# `workqueue` does not allow nested functions to be parallelized.
# Additionally, there are currently problems with numba's compiler around
# parallelization of this code:
# https://github.com/numba/numba/issues/6774#issuecomment-788789663
@numba.njit(cache=True)
def _gearys_c_inner_sparse_x_densevec(g_data, g_indices, g_indptr, x, W):
x_bar = x.mean()
total = 0.0
N = len(x)
for i in numba.prange(N):
s = slice(g_indptr[i], g_indptr[i + 1])
i_indices = g_indices[s]
i_data = g_data[s]
total += np.sum(i_data * ((x[i] - x[i_indices]) ** 2))
numer = (N - 1) * total
denom = 2 * W * ((x - x_bar) ** 2).sum()
C = numer / denom
return C
@numba.njit(cache=True)
def _gearys_c_inner_sparse_x_sparsevec(
g_data, g_indices, g_indptr, x_data, x_indices, N, W
):
x = np.zeros(N, dtype=np.float_)
x[x_indices] = x_data
x_bar = np.sum(x_data) / N
total = 0.0
N = len(x)
for i in numba.prange(N):
s = slice(g_indptr[i], g_indptr[i + 1])
i_indices = g_indices[s]
i_data = g_data[s]
total += np.sum(i_data * ((x[i] - x[i_indices]) ** 2))
numer = (N - 1) * total
# Expanded from 2 * W * ((x_k - x_k_bar) ** 2).sum(), but uses sparsity
# to skip some calculations
# fmt: off
denom = (
2 * W
* (
np.sum(x_data ** 2)
- np.sum(x_data * x_bar * 2)
+ (x_bar ** 2) * N
)
)
# fmt: on
C = numer / denom
return C
@numba.njit(cache=True, parallel=True)
def _gearys_c_mtx(g_data, g_indices, g_indptr, X):
M, N = X.shape
assert N == len(g_indptr) - 1
W = g_data.sum()
out = np.zeros(M, dtype=np.float_)
for k in numba.prange(M):
x = X[k, :].astype(np.float_)
out[k] = _gearys_c_inner_sparse_x_densevec(g_data, g_indices, g_indptr, x, W)
return out
@numba.njit(cache=True, parallel=True)
def _gearys_c_mtx_csr(
g_data, g_indices, g_indptr, x_data, x_indices, x_indptr, x_shape
):
M, N = x_shape
W = g_data.sum()
out = np.zeros(M, dtype=np.float_)
x_data_list = np.split(x_data, x_indptr[1:-1])
x_indices_list = np.split(x_indices, x_indptr[1:-1])
for k in numba.prange(M):
out[k] = _gearys_c_inner_sparse_x_sparsevec(
g_data,
g_indices,
g_indptr,
x_data_list[k],
x_indices_list[k],
N,
W,
)
return out
###############################################################################
# Interface
###############################################################################
@singledispatch
def _resolve_vals(val):
return np.asarray(val)
@_resolve_vals.register(np.ndarray)
@_resolve_vals.register(sparse.csr_matrix)
def _(val):
return val
@_resolve_vals.register(sparse.spmatrix)
def _(val):
return sparse.csr_matrix(val)
@_resolve_vals.register(pd.DataFrame)
@_resolve_vals.register(pd.Series)
def _(val):
return val.to_numpy()
def _check_vals(vals):
"""\
Checks that values wont cause issues in computation.
Returns new set of vals, and indexer to put values back into result.
For details on why this is neccesary, see:
https://github.com/theislab/scanpy/issues/1806
"""
from scanpy._utils import is_constant
full_result = np.empty(vals.shape[0], dtype=np.float64)
full_result.fill(np.nan)
idxer = ~is_constant(vals, axis=1)
if idxer.all():
idxer = slice(None)
else:
warnings.warn(
UserWarning(
f"{len(idxer) - idxer.sum()} variables were constant, will return nan for these.",
)
)
return vals[idxer], idxer, full_result
@gearys_c.register(sparse.csr_matrix)
def _gearys_c(g, vals) -> np.ndarray:
assert g.shape[0] == g.shape[1], "`g` should be a square adjacency matrix"
vals = _resolve_vals(vals)
g_data = g.data.astype(np.float_, copy=False)
if isinstance(vals, sparse.csr_matrix):
assert g.shape[0] == vals.shape[1]
new_vals, idxer, full_result = _check_vals(vals)
result = _gearys_c_mtx_csr(
g_data,
g.indices,
g.indptr,
new_vals.data.astype(np.float_, copy=False),
new_vals.indices,
new_vals.indptr,
new_vals.shape,
)
full_result[idxer] = result
return full_result
elif isinstance(vals, np.ndarray) and vals.ndim == 1:
assert g.shape[0] == vals.shape[0]
return _gearys_c_vec(g_data, g.indices, g.indptr, vals)
elif isinstance(vals, np.ndarray) and vals.ndim == 2:
assert g.shape[0] == vals.shape[1]
new_vals, idxer, full_result = _check_vals(vals)
result = _gearys_c_mtx(g_data, g.indices, g.indptr, new_vals)
full_result[idxer] = result
return full_result
else:
raise NotImplementedError()
| {
"repo_name": "theislab/scanpy",
"path": "scanpy/metrics/_gearys_c.py",
"copies": "1",
"size": "9804",
"license": "bsd-3-clause",
"hash": 735643843749345300,
"line_mean": 28.7090909091,
"line_max": 98,
"alpha_frac": 0.5784373725,
"autogenerated": false,
"ratio": 3.2778335005015045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4356270873001505,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
import numbers
from typing import Tuple
import math
from .transferfunction import TransferFunction
from .statespace import StateSpace
from .exception import WrongSampleTime, UnknownDiscretizationMethod
from .model_conversion import *
import numpy as np
__all__ = ['c2d']
def _zoh(sys_: StateSpace, sample_time: numbers.Real) -> Tuple[np.ndarray, ...]:
AT = sys_.A * sample_time
AT_K = [np.eye(sys_.A.shape[0]), AT]
for i in range(18):
AT_K.append(AT_K[-1] * AT)
G = 0
for k in range(20):
G += AT_K[k] / math.factorial(k)
H = 0
for k in range(20):
H += AT_K[k] / math.factorial(k + 1)
H *= sample_time
H = H * sys_.B
return G, H, sys_.C.copy(), sys_.D.copy()
def _tustin(sys_: StateSpace, sample_time: numbers.Real) -> Tuple[np.ndarray, ...]:
alpha = 2 / sample_time
eye = np.eye(sys_.A.shape[0])
P = eye - 1 / alpha * sys_.A
Q = eye + 1 / alpha * sys_.A
A = P.I * Q
B = P.I * sys_.B
C = 2 / alpha * sys_.C * P.I
D = sys_.D + sys_.C * B / alpha
return A, B, C, D
def _matched(sys_: TransferFunction, sample_time: numbers.Real) -> Tuple[np.ndarray, ...]:
poles = sys_.pole()
zeros = sys_.zero()
num = np.poly(np.exp(zeros * sample_time))
den = np.poly(np.exp(poles * sample_time))
root_number_delta = np.roots(den).shape[0] - np.roots(num).shape[0]
while root_number_delta > 0:
num = np.polymul(num, np.array([1, 1]))
root_number_delta -= 1
nump = np.poly1d(num)
denp = np.poly1d(den)
ds = np.polyval(sys_.num, 0) / np.polyval(sys_.den, 0)
d1z = nump(1) / denp(1)
dz_gain = ds / d1z
return num * dz_gain, den
_methods = {'matched': _matched, 'Tustin': _tustin, 'tustin': _tustin,
'bilinear': _tustin, 'zoh': _zoh}
@singledispatch
def c2d(sys_, sample_time, method='zoh'):
"""
Convert a continuous system to a discrete system.
:param sys_: the system to be transformed
:type sys_: TransferFunction | StateSpace
:param sample_time: sample time
:type sample_time: numbers.Real
:param method: method used in transformation(default: zoh)
:type method: str
:return: a discrete system
:rtype: TransferFunction | StateSpace
:raises TypeError: Raised when the type of sys_ is wrong
:Example:
>>> system = TransferFunction([1], [1, 1])
>>> c2d(system, 0.5, 'matched')
0.393469340287367
-------------------------
1.0*z - 0.606530659712633
sample time:0.5s
"""
raise TypeError(f'TransferFunction or StateSpace expected, got{type(sys_)}')
@c2d.register(TransferFunction)
def fn(sys_, sample_time, method='zoh'):
if sample_time <= 0:
raise WrongSampleTime(f'The sample time must be larger than 0. got {sample_time}')
try:
f = _methods[method]
except KeyError as e:
raise UnknownDiscretizationMethod from e
if method != 'matched':
A, B, C, D = f(tf2ss(sys_), sample_time)
return ss2tf(StateSpace(A, B, C, D, dt=sample_time))
else:
num, den = f(sys_, sample_time)
return TransferFunction(num, den, dt=sample_time)
@c2d.register(StateSpace)
def fn(sys_, sample_time, method='zoh'):
if sample_time <= 0:
raise WrongSampleTime(f'The sample time must be larger than 0. got {sample_time}')
try:
f = _methods[method]
except KeyError as e:
raise UnknownDiscretizationMethod from e
A, B, C, D = f(sys_, sample_time)
return StateSpace(A, B, C, D, dt=sample_time)
| {
"repo_name": "DaivdZhang/tinyControl",
"path": "tcontrol/discretization.py",
"copies": "1",
"size": "3604",
"license": "bsd-3-clause",
"hash": -6438048848176653000,
"line_mean": 28.3008130081,
"line_max": 90,
"alpha_frac": 0.6040510544,
"autogenerated": false,
"ratio": 3.1042204995693368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42082715539693366,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
import typing
from typing import Callable, Any, Dict, TypeVar, Type
from amino.util.string import snake_case
from amino.algebra import Algebra
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
Alg = TypeVar('Alg', bound=Algebra)
def dispatch(obj: B, tpes: typing.List[A], prefix: str, default: Callable[[A], R]=None) -> Callable[[A], R]:
def error(o: A) -> None:
msg = 'no dispatcher defined for {} on {} {}'
raise TypeError(msg.format(o, obj.__class__.__name__, prefix))
@singledispatch
def main(o: A, *a: Any, **kw: Any) -> R:
if default is None:
error(o)
else:
return default(o, *a, **kw)
for tpe in tpes:
fun = getattr(obj, '{}{}'.format(prefix, snake_case(tpe.__name__)), None)
if fun is None:
error(tpe)
main.register(tpe)(fun)
return main
def dispatch_alg(obj: B, alg: Type[Alg], prefix: str='', default: Callable[[Alg], R]=None) -> Callable[[Alg], R]:
return dispatch(obj, alg.__algebra_variants__, prefix, default)
def dispatch_with(rules: Dict[type, Callable], default: Callable=None):
@singledispatch
def main(o, *a, **kw):
if default is None:
msg = 'no dispatcher defined for {} {} ({})'
raise TypeError(msg.format(type(o), o, rules))
else:
default(o, *a, **kw)
for tpe, fun in rules.items():
main.register(tpe)(fun)
return main
__all__ = ('dispatch', 'dispatch_alg', 'dispatch_with')
| {
"repo_name": "tek/amino",
"path": "amino/dispatch.py",
"copies": "1",
"size": "1537",
"license": "mit",
"hash": -2206986871171945000,
"line_mean": 30.3673469388,
"line_max": 113,
"alpha_frac": 0.589459987,
"autogenerated": false,
"ratio": 3.341304347826087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4430764334826087,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
import warnings
import traceback
from werkzeug.exceptions import HTTPException
from werkzeug.routing import Rule
from werkzeug.wrappers import Response, BaseResponse
from findig.content import ErrorHandler, Formatter, Parser
from findig.context import ctx
from findig.resource import Resource, AbstractResource
from findig.utils import DataPipe
class Dispatcher:
"""
A :class:`Dispatcher` creates resources and routes requests to them.
:param formatter: A function that converts resource data to a string
string suitable for output. It returns a 2-tuple:
*(mime_type, output)*. If not given, a generic
:class:`findig.content.Formatter` is used.
:param parser: A function that parses request input and returns a
2-tuple: *(mime_type, data)*. If not given, a generic
:class:`findig.content.Parser`.
:param error_handler: A function that converts an exception to a
:class:`Response <werkzeug.wrappers.BaseResponse>`. If not given,
a generic :class:`findig.content.ErrorHandler` is used.
:param pre_processor: A function that is called on request data just
after is is parsed.
:param post_processor: A function that is called on resource data
just before it is formatted.
This class is fairly low-level and shouldn't be instantiated directly in
application code. It does however serve as a base for :class:`findig.App`.
"""
#: A class that is used to construct responses after they're
#: returned from formatters.
response_class = Response
def __init__(self, formatter=None, parser=None, error_handler=None,
pre_processor=None, post_processor=None):
self.route = singledispatch(self.route)
self.route.register(str, self.route_decorator)
if error_handler is None:
error_handler = ErrorHandler()
error_handler.register(BaseException, self._handle_exception)
error_handler.register(HTTPException, self._handle_http_exception)
if parser is None:
parser = Parser()
if formatter is None:
formatter = Formatter()
formatter.register('text/plain', str, default=True)
self.formatter = formatter
self.parser = parser
self.error_handler = error_handler
self.pre_processor = DataPipe() \
if pre_processor is None \
else pre_processor
self.post_processor = DataPipe() \
if post_processor is None \
else post_processor
self.resources = {}
self.routes = []
self.endpoints = {}
def _handle_exception(self, err):
# TODO: log error
traceback.print_exc()
return Response("An internal application error has been logged.",
status=500)
def _handle_http_exception(self, http_err):
response = http_err.get_response(ctx.request)
headers = response.headers
del headers['Content-Type']
del headers['Content-Length']
return Response(http_err.description, status=response.status,
headers=response.headers)
def resource(self, wrapped=None, **args):
"""
resource(wrapped, **args)
Create a :class:`findig.resource.Resource` instance.
:param wrapped: A wrapped function for the resource. In most cases,
this should be a function that takes named
route arguments for the resource and returns a
dictionary with the resource's data.
The keyword arguments are passed on directly to the constructor
for :class:`Resource`, with the exception that *name* will default to
{module}.{name} of the wrapped function if not given.
This method may also be used as a decorator factory::
@dispatcher.resource(name='my-very-special-resource')
def my_resource(route, param):
return {'id': 10, ... }
In this case the decorated function will be replaced by a
:class:`Resource` instance that wraps it. Any keyword arguments
passed to the decorator factory will be handed over to the
:class:`Resource` constructor. If no keyword arguments
are required, then ``@resource`` may be used instead of
``@resource()``.
.. note:: If this function is used as a decorator factory, then
a keyword parameter for *wrapped* must not be used.
"""
def decorator(wrapped):
args['wrapped'] = wrapped
args.setdefault(
'name', "{0.__module__}.{0.__qualname__}".format(wrapped))
resource = Resource(**args)
self.resources[resource.name] = resource
return resource
if wrapped is not None:
return decorator(wrapped)
else:
return decorator
def route(self, resource, rulestr, **ruleargs):
"""
Add a route to a resource.
Adding a URL route to a resource allows Findig to dispatch
incoming requests to it.
:param resource: The resource that the route will be created for.
:type resource: :class:`Resource` or function
:param rulestr: A URL rule, according to
:ref:`werkzeug's specification <werkzeug:routing>`.
:type rulestr: str
See :py:class:`werkzeug.routing.Rule` for valid rule parameters.
This method can also be used as a decorator factory to assign
route to resources using declarative syntax::
@route("/index")
@resource(name='index')
def index_generator():
return ( ... )
"""
if not isinstance(resource, AbstractResource):
resource = self.resource(resource)
self.routes.append((resource, rulestr, ruleargs))
return resource
def route_decorator(self, rulestr, **ruleargs):
# See :meth:`route`.
def decorator(resource):
# Collect the rule
resource = self.route(resource, rulestr, **ruleargs)
# return the resource
return resource
return decorator
def build_rules(self):
"""
Return a generator for all of the url rules collected by the
:class:`Dispatcher`.
:rtype: Iterable of :class:`werkzeug.routing.Rule`
.. note:: This method will 'freeze' resource names; do not change
resource names after this function is invoked.
"""
self.endpoints.clear()
# Refresh the resource dict so that up-to-date resource names
# are used in dictionaries
self.resources = dict((r.name, r) for r in self.resources.values())
# Build the URL rules
for resource, string, args in self.routes:
# Set up the callback endpoint
args.setdefault('endpoint', resource.name)
self.endpoints[args['endpoint']] = resource
# And the supported methods
supported_methods = resource.get_supported_methods()
restricted_methods = set(
map(str.upper, args.get('methods', supported_methods)))
args['methods'] = supported_methods.intersection(
restricted_methods)
# warn about unsupported methods
unsupported_methods = list(
set(restricted_methods) - supported_methods
)
if unsupported_methods:
warnings.warn(
"Error building rule: {string}\n"
"The following HTTP methods have been declared, but "
"are not supported by the data model for {resource.name}: "
"{unsupported_methods}.".format(**locals())
)
# Initialize the rule, and yield it
yield Rule(string, **args)
def get_resource(self, rule):
return self.endpoints[rule.endpoint]
def dispatch(self):
"""
Dispatch the current request to the appropriate resource, based on
which resource the rule applies to.
This function requires an active request context in order to work.
"""
# TODO: document request context variables.
request = ctx.request
url_values = ctx.url_values
resource = ctx.resource
ctx.response = response = {'headers': {}} # response arguments
try:
data = resource.handle_request(request, url_values)
response = {
k: v for k, v in response.items()
if k in ('status', 'headers')
}
if isinstance(data, (self.response_class, BaseResponse)):
return data
elif data is not None:
process = DataPipe(
getattr(resource, 'post_processor', None),
self.post_processor
)
data = process(data)
format = Formatter.compose(
getattr(resource, 'formatter', Formatter()),
self.formatter
)
mime_type, data = format(data)
response['mimetype'] = mime_type
response['response'] = data
return self.response_class(**response)
except BaseException as err:
return self.error_handler(err)
@property
def unrouted_resources(self):
"""
A list of resources created by the dispatcher which have no
routes to them.
"""
routed = set()
for resource in self.endpoints.values():
if resource.name in self.resources:
routed.add(resource.name)
else:
return list(map(self.resources.get,
set(self.resources) - routed))
| {
"repo_name": "geniphi/findig",
"path": "findig/dispatcher.py",
"copies": "1",
"size": "10003",
"license": "mit",
"hash": 7133685226032348000,
"line_mean": 34.725,
"line_max": 79,
"alpha_frac": 0.5956213136,
"autogenerated": false,
"ratio": 4.896231032794909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5991852346394909,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
from llvmlite.llvmpy.core import Type, Constant
from numba.core import types, typing, cgutils
from numba.core.imputils import Registry
from numba.cuda import nvvmutils
registry = Registry()
lower = registry.lower
voidptr = Type.pointer(Type.int(8))
# NOTE: we don't use @lower here since print_item() doesn't return a LLVM value
@singledispatch
def print_item(ty, context, builder, val):
"""
Handle printing of a single value of the given Numba type.
A (format string, [list of arguments]) is returned that will allow
forming the final printf()-like call.
"""
raise NotImplementedError("printing unimplemented for values of type %s"
% (ty,))
@print_item.register(types.Integer)
@print_item.register(types.IntegerLiteral)
def int_print_impl(ty, context, builder, val):
if ty in types.unsigned_domain:
rawfmt = "%llu"
dsttype = types.uint64
else:
rawfmt = "%lld"
dsttype = types.int64
fmt = context.insert_string_const_addrspace(builder, rawfmt)
lld = context.cast(builder, val, ty, dsttype)
return rawfmt, [lld]
@print_item.register(types.Float)
def real_print_impl(ty, context, builder, val):
lld = context.cast(builder, val, ty, types.float64)
return "%f", [lld]
@print_item.register(types.StringLiteral)
def const_print_impl(ty, context, builder, sigval):
pyval = ty.literal_value
assert isinstance(pyval, str) # Ensured by lowering
rawfmt = "%s"
val = context.insert_string_const_addrspace(builder, pyval)
return rawfmt, [val]
@lower(print, types.VarArg(types.Any))
def print_varargs(context, builder, sig, args):
"""This function is a generic 'print' wrapper for arbitrary types.
It dispatches to the appropriate 'print' implementations above
depending on the detected real types in the signature."""
vprint = nvvmutils.declare_vprint(builder.module)
formats = []
values = []
for i, (argtype, argval) in enumerate(zip(sig.args, args)):
argfmt, argvals = print_item(argtype, context, builder, argval)
formats.append(argfmt)
values.extend(argvals)
rawfmt = " ".join(formats) + "\n"
fmt = context.insert_string_const_addrspace(builder, rawfmt)
array = cgutils.make_anonymous_struct(builder, values)
arrayptr = cgutils.alloca_once_value(builder, array)
vprint = nvvmutils.declare_vprint(builder.module)
builder.call(vprint, (fmt, builder.bitcast(arrayptr, voidptr)))
return context.get_dummy_value()
| {
"repo_name": "sklam/numba",
"path": "numba/cuda/printimpl.py",
"copies": "1",
"size": "2571",
"license": "bsd-2-clause",
"hash": 136926388538165970,
"line_mean": 31.5443037975,
"line_max": 79,
"alpha_frac": 0.6938934267,
"autogenerated": false,
"ratio": 3.4884667571234735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46823601838234735,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
import sympy
from devito.logger import warning
from devito.finite_differences.differentiable import Add, Mul, EvalDerivative
from devito.finite_differences.derivative import Derivative
from devito.tools import as_tuple
__all__ = ['solve', 'linsolve']
class SolveError(Exception):
"""Base class for exceptions in this module."""
pass
def solve(eq, target, **kwargs):
"""
Algebraically rearrange an Eq w.r.t. a given symbol.
This is a wrapper around ``sympy.solve``.
Parameters
----------
eq : expr-like
The equation to be rearranged.
target : symbol
The symbol w.r.t. which the equation is rearranged. May be a `Function`
or any other symbolic object.
**kwargs
Symbolic optimizations applied while rearranging the equation. For more
information. refer to ``sympy.solve.__doc__``.
"""
try:
eq = eq.lhs - eq.rhs if eq.rhs != 0 else eq.lhs
except AttributeError:
pass
eqs, targets = as_tuple(eq), as_tuple(target)
if len(eqs) == 0:
warning("Empty input equation, returning `None`")
return None
sols = []
for e, t in zip(eqs, targets):
# Try first linear solver
try:
sols.append(linsolve(eval_time_derivatives(e), t))
except SolveError:
warning("Equation is not affine w.r.t the target, falling back to standard"
"sympy.solve that may be slow")
kwargs['rational'] = False # Avoid float indices
kwargs['simplify'] = False # Do not attempt premature optimisation
sols.append(sympy.solve(e.evaluate, t, **kwargs)[0])
# We need to rebuild the vector/tensor as sympy.solve outputs a tuple of solutions
if len(sols) > 1:
return target.new_from_mat(sols)
else:
return sols[0]
def linsolve(expr, target, **kwargs):
"""
Linear solve for the target in a single equation.
Parameters
----------
expr : expr-like
The expr to be rearranged.
target : symbol
The symbol w.r.t. which the equation is rearranged. May be a `Function`
or any other symbolic object.
"""
c = factorize_target(expr, target)
if c != 0:
return -expr.xreplace({target: 0})/c
raise SolveError("No linear solution found")
@singledispatch
def eval_time_derivatives(expr):
"""
Evaluate all time derivatives in the expression.
"""
return expr
@eval_time_derivatives.register(Derivative)
def _(expr):
if any(d.is_Time for d in expr.dims):
return expr.evaluate
return expr
@eval_time_derivatives.register(Add)
@eval_time_derivatives.register(Mul)
def _(expr):
return expr.func(*[eval_time_derivatives(a) for a in expr.args])
@singledispatch
def factorize_target(expr, target):
return 1 if expr is target else 0
@factorize_target.register(Add)
@factorize_target.register(EvalDerivative)
def _(expr, target):
c = 0
if not expr.has(target):
return c
for a in expr.args:
c += factorize_target(a, target)
return c
@factorize_target.register(Mul)
def _(expr, target):
if not expr.has(target):
return 0
c = 1
for a in expr.args:
c *= a if not a.has(target) else factorize_target(a, target)
return c
| {
"repo_name": "opesci/devito",
"path": "devito/operations/solve.py",
"copies": "1",
"size": "3353",
"license": "mit",
"hash": -2548138694532712000,
"line_mean": 25.1953125,
"line_max": 87,
"alpha_frac": 0.6376379362,
"autogenerated": false,
"ratio": 3.69273127753304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.483036921373304,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch
class BaseWriter:
"""Base class for recursive writers.
Usage:
- Create an instance of this class.
- Use :meth:`register` in the same manner as Python's built-in
:func:`functools.singledispatch` to decorate functions that certain types
of :mod:`pandasdmx.model` or :mod:`pandasdmx.message` objects.
- Call :meth:`recurse` to kick off recursive writing of objects, including
from inside other functions.
Example
-------
MyWriter = BaseWriter('my')
@MyWriter.register
def _(obj: sdmx.model.ItemScheme):
... code to write an ItemScheme ...
return result
@MyWriter.register
def _(obj: sdmx.model.Codelist):
... code to write a Codelist ...
return result
"""
def __init__(self, format_name):
# Create the single-dispatch function
@singledispatch
def func(obj, *args, **kwargs):
raise NotImplementedError(
f"write {obj.__class__.__name__} to " f"{format_name}"
)
self._dispatcher = func
def recurse(self, obj, *args, **kwargs):
"""Recursively write *obj*.
If there is no :meth:`register` 'ed function to write the class of
`obj`, then the parent class of `obj` is used to find a method.
"""
# TODO use a cache to speed up the MRO does not need to be traversed
# for every object instance
dispatcher = getattr(self, "_dispatcher")
try:
# Let the single dispatch function choose the overload
return dispatcher(obj, *args, **kwargs)
except NotImplementedError as exc:
try:
# Use the object's parent class to get a different overload
func = dispatcher.registry[obj.__class__.mro()[1]]
except KeyError:
# Overload for the parent class did not exist
raise exc
return func(obj, *args, **kwargs)
def __call__(self, func):
"""Register *func* as a writer for a particular object type."""
dispatcher = getattr(self, "_dispatcher")
dispatcher.register(func)
return func
| {
"repo_name": "dr-leo/pandaSDMX",
"path": "pandasdmx/writer/base.py",
"copies": "1",
"size": "2221",
"license": "apache-2.0",
"hash": 6569591053051041000,
"line_mean": 31.6617647059,
"line_max": 79,
"alpha_frac": 0.5938766321,
"autogenerated": false,
"ratio": 4.505070993914807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
from functools import singledispatch
class cached_property:
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: # pragma: no cover
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def prepare_query_params(**kwargs):
"""Prepares given parameters to be used in querystring."""
return [
(sub_key, sub_value)
for key, value in kwargs.items()
for sub_key, sub_value in expand(value, key)
if sub_value is not None
]
class Filter(dict):
"""Custom subtype to apply special behaviour."""
def __init__(self, connector="AND", **kwargs):
self.connector = connector
super().__init__(**kwargs)
class Sort(list):
"""Handles sort queries."""
def __init__(self, args, model):
if not isinstance(args, (list, tuple, set)):
args = [args]
super().__init__([self.prepend_model(value, model) for value in args])
def prepend_model(self, value, model):
"""Prepends model name if it is not already prepended.
For example model is "Offer":
key -> Offer.key
-key -> -Offer.key
Offer.key -> Offer.key
-Offer.key -> -Offer.key
"""
if "." not in value:
direction = ""
if value.startswith("-"):
value = value[1:]
direction = "-"
value = "{}{}.{}".format(direction, model, value)
return value
OPERATORS = {
"ne": "NOT_EQUAL_TO",
"lt": "LESS_THAN",
"lte": "LESS_THAN_OR_EQUAL_TO",
"gt": "GREATER_THAN",
"gte": "GREATER_THAN_OR_EQUAL_TO",
"like": "LIKE",
"not_like": "NOT_LIKE",
"null": "NULL",
"not_null": "NOT_NULL",
"true": "TRUE",
"false": "FALSE",
}
@singledispatch
def expand(value, key):
yield key, value
@expand.register(dict)
def expand_dict(value, key):
for dict_key, dict_value in value.items():
yield "{}[{}]".format(key, dict_key), dict_value
@expand.register(Filter)
def expand_filter(value, key):
for dict_key, dict_value in value.items():
if isinstance(dict_value, (list, tuple, set)):
for sub_value in dict_value:
yield "{}[{}][]".format(key, dict_key), sub_value
else:
try:
field_name, operator = dict_key.split("__")
param_name = "{}[{}][{}]".format(key, field_name, OPERATORS[operator])
except ValueError:
if value.connector == "OR":
param_name = "{}[{}][{}]".format(key, value.connector, dict_key)
else:
param_name = "{}[{}]".format(key, dict_key)
yield param_name, dict_value
@expand.register(Sort)
def expand_sort(value, key):
for field_name in value:
order = "asc"
if field_name.startswith("-"):
order = "desc"
field_name = field_name[1:]
yield "{}[{}]".format(key, field_name), order
@expand.register(list)
@expand.register(tuple)
@expand.register(set)
def expand_lists(value, key):
for inner_value in value:
yield "%s[]" % key, inner_value
| {
"repo_name": "Stranger6667/pyoffers",
"path": "src/pyoffers/utils.py",
"copies": "1",
"size": "3256",
"license": "mit",
"hash": -11806878575648148,
"line_mean": 26.3613445378,
"line_max": 86,
"alpha_frac": 0.5399262899,
"autogenerated": false,
"ratio": 3.7168949771689497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47568212670689497,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch, total_ordering
@singledispatch
def function(arg):
return "Type of argument: {}".format(type(arg))
@function.register(list)
def function_list(arg):
return "Size of list: {}".format(len(arg))
@function.register(int)
def function_int(arg):
return "Value of int: {}".format(arg)
@function.register(set)
def function_set(arg):
return "Size of set: {}".format(len(arg))
def example_with_singledispatch():
"""
As of Python 3.4 you can use functools.singledispatch to
call an appropriate function depending on the type of the argument.
You can learn more about this in PEP 443.
"""
print(function(3))
print(function([1, 2, 3]))
print(function(set([1, 2, 3])))
print(function(object()))
def example_with_total_ordering():
"""
As of Python 3.2 you can use functools.total_ordering to
compare instances of any class as long as you've implemented
the __eq__() method and one of __lt__(), __le__(), __gt__(),
or __ge__().
"""
@total_ordering
class Person:
def __init__(self, age):
self.age = age
def __eq__(self, other):
return self.age == other.age
def __lt__(self, other):
return self.age < other.age
def __repr__(self):
return "Person (age: {})".format(self.age)
print(Person(age=20) < Person(age=30))
print(Person(age=20) > Person(age=30))
print(Person(age=20) <= Person(age=30))
print(Person(age=20) >= Person(age=30))
print(Person(age=20) == Person(age=30))
| {
"repo_name": "svisser/python-3-examples",
"path": "examples/functools.py",
"copies": "1",
"size": "1591",
"license": "mit",
"hash": -5021913701355044000,
"line_mean": 24.253968254,
"line_max": 71,
"alpha_frac": 0.6096794469,
"autogenerated": false,
"ratio": 3.5752808988764047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46849603457764044,
"avg_score": null,
"num_lines": null
} |
from functools import singledispatch, wraps
import stl
from optlang import Constraint, Variable
from magnum.constraint_kinds import Kind as K
eps = 1e-7
M = 1000 # TODO
def counter(func):
i = 0
@wraps(func)
def _func(*args, **kwargs):
nonlocal i
i += 1
return func(*args, i=i, **kwargs)
return _func
@counter
def z(x: "SL", i: int):
# TODO: come up with better function name
cat = 'binary' if isinstance(x[0], stl.LinEq) else 'continuous'
if isinstance(x[0], stl.LinEq):
prefix = "q"
else:
prefix = "z"
kwargs = {"name": "{}{}".format(prefix, i)}
return Variable(type=cat, **kwargs)
@singledispatch
def encode(psi, s, t, within_or=False):
raise NotImplementedError(psi)
@encode.register(stl.LinEq)
def encode_lineq(psi, s, t, within_or=False):
x = sum(float(term.coeff) * s[(term.id, t)][0] for term in psi.terms)
if not within_or:
if psi.op == "=":
lb = ub = psi.const
elif psi.op in ("<", "<="):
lb, ub = None, psi.const
elif psi.op in (">", ">="):
lb, ub = psi.const, None
yield Constraint(x, lb=lb, ub=ub), psi
else:
z_phi = z((psi, t))
s[psi, t, 'or'] = z_phi
x = x - psi.const if psi.op in (">", ">=") else psi.const - x
yield Constraint(x - M * z_phi + eps, ub=0), psi
yield Constraint(-x - M * (1 - z_phi) + eps, ub=0), psi
@encode.register(stl.Next)
def encode_next(phi, s, t, within_or=False):
yield from encode(phi.arg, s, t + 1, within_or)
if within_or:
s[phi, t, 'or'] = s[phi.arg, t + 1, 'or']
@encode.register(stl.And)
def encode_and(phi, s, t, within_or=False):
if within_or:
raise NotImplementedError
for psi in phi.args:
yield from encode(psi, s, t, within_or)
@encode.register(stl.Or)
def encode_or(phi, s, t, within_or=False):
if within_or:
raise NotImplementedError
# Shallow encoding of or constraint
# For at least one of childs to be satisified
for psi in phi.args:
yield from encode(psi, s, t, within_or=True)
elems = [s[psi, t, 'or'] for psi in phi.args]
yield Constraint(sum(elems), lb=0.5), K.OR_TOTAL
| {
"repo_name": "mvcisback/py-blustl",
"path": "magnum/solvers/milp/boolean_encoding.py",
"copies": "2",
"size": "2238",
"license": "bsd-3-clause",
"hash": 4541561260336413000,
"line_mean": 24.4318181818,
"line_max": 73,
"alpha_frac": 0.5786416443,
"autogenerated": false,
"ratio": 3.0366350067842607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461527665108426,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering, lru_cache
import logging
import re
import requests
from eva_cttv_pipeline.trait_mapping.ols import get_ontology_label_from_ols, is_in_efo
from eva_cttv_pipeline.trait_mapping.ols import is_current_and_in_efo
from eva_cttv_pipeline.trait_mapping.utils import json_request
logger = logging.getLogger(__package__)
class OntologyUri:
db_to_uri_dict = {
"orphanet": "http://www.orpha.net/ORDO/Orphanet_{}",
"omim": "http://identifiers.org/omim/{}",
"efo": "http://www.ebi.ac.uk/efo/EFO_{}",
"mesh": "http://identifiers.org/mesh/{}",
"medgen": "http://identifiers.org/medgen/{}",
"hp": "http://purl.obolibrary.org/obo/HP_{}",
"doid": "http://purl.obolibrary.org/obo/DOID_{}",
"mondo": "http://purl.obolibrary.org/obo/MONDO_{}",
}
def __init__(self, id_, db):
self.id_ = id_
self.db = db
self.uri = self.db_to_uri_dict[self.db.lower()].format(self.id_)
def __str__(self):
return self.uri
@total_ordering
class OxOMapping:
"""
Individual mapping for an ontology ID mapped to one other ontology ID. An OxO result can consist
of multiple mappings.
"""
def __init__(self, label, curie, distance, query_id):
self.label = label
self.curie = curie
self.db, self.id_ = curie.split(":")
self.uri = OntologyUri(self.id_, self.db)
self.distance = distance
self.query_id = query_id
self.in_efo = False
# For non-EFO mappings, `is_current` property does not make sense and it not used
self.is_current = False
self.ontology_label = ""
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.label == other.label, self.db == other.db, self.id_ == other.id_,
self.distance == other.distance, self.in_efo == other.in_efo,
self.is_current == other.is_current, self.ontology_label == other.ontology_label)
def __lt__(self, other):
return ((other.distance, self.in_efo, self.is_current) <
(self.distance, other.in_efo, other.is_current))
def __str__(self):
return "{}, {}, {}, {}".format(self.label, self.curie, self.distance, self.query_id)
class OxOResult:
"""
A single result from querying OxO for one ID. A result can contain multiple mappings. A response
from OxO can contain multiple results- one per queried ID.
"""
def __init__(self, query_id, label, curie):
self.query_id = query_id
self.label = label
self.curie = curie
self.db, self.id_ = curie.split(":")
self.uri = OntologyUri(self.id_, self.db)
self.mapping_list = []
def __str__(self):
return "{}, {}, {}".format(self.query_id, self.label, self.curie)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.query_id == other.query_id, self.label == other.label,
self.curie == other.curie, self.db == other.db, self.id_ == other.id_,
self.uri == other.uri, self.mapping_list == other.mapping_list)
URI_DB_TO_DB_DICT = {
"ordo": "Orphanet",
"orphanet": "Orphanet",
"omim": "OMIM",
"efo": "EFO",
"mesh": "MeSH",
"hp": "HP",
"doid": "DOID",
"mondo": "MONDO",
}
NON_NUMERIC_RE = re.compile(r'[^\d]+')
@lru_cache(maxsize=16384)
def uri_to_oxo_format(uri: str) -> str:
"""
Convert an ontology uri to a DB:ID format with which to query OxO
:param uri: Ontology uri for a term
:return: String in the format "DB:ID" with which to query OxO
"""
if not any(x in uri.lower() for x in URI_DB_TO_DB_DICT.keys()):
return None
uri = uri.rstrip("/")
uri_list = uri.split("/")
if "identifiers.org" in uri:
db = uri_list[-2]
id_ = uri_list[-1]
else:
db, id_ = uri_list[-1].split("_")
db = URI_DB_TO_DB_DICT[db.lower()]
return "{}:{}".format(db, id_)
def uris_to_oxo_format(uri_set: set) -> list:
"""For each ontology uri in a set convert to the format of an ID suitable for querying OxO"""
oxo_id_list = []
for uri in uri_set:
oxo_id = uri_to_oxo_format(uri)
if oxo_id is not None:
oxo_id_list.append(oxo_id)
return oxo_id_list
def build_oxo_payload(id_list: list, target_list: list, distance: int) -> dict:
"""
Build a dict containing the payload with which to make a POST request to OxO for finding xrefs
for IDs in provided id_list, with the constraints provided in target_list and distance.
:param id_list: List of IDs with which to find xrefs using OxO
:param target_list: List of ontology datasources to include
:param distance: Number of steps to take through xrefs to find mappings
:return: dict containing payload to be used in POST request with OxO
"""
payload = {}
payload["ids"] = id_list
payload["mappingTarget"] = target_list
payload["distance"] = distance
return payload
def get_oxo_results_from_response(oxo_response: dict) -> list:
"""
For a json(/dict) response from an OxO request, parse the data into a list of OxOResults
:param oxo_response: Response from OxO request
:return: List of OxOResults based upon the response from OxO
"""
oxo_result_list = []
results = oxo_response["_embedded"]["searchResults"]
for result in results:
if len(result["mappingResponseList"]) == 0:
continue
query_id = result["queryId"]
label = result["label"]
curie = result["curie"]
oxo_result = OxOResult(query_id, label, curie)
for mapping_response in result["mappingResponseList"]:
mapping_label = mapping_response["label"]
mapping_curie = mapping_response["curie"]
mapping_distance = mapping_response["distance"]
oxo_mapping = OxOMapping(mapping_label, mapping_curie, mapping_distance, query_id)
uri = str(oxo_mapping.uri)
ontology_label = get_ontology_label_from_ols(uri)
if ontology_label is not None:
oxo_mapping.ontology_label = ontology_label
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
if not uri_is_current_and_in_efo:
uri_is_in_efo = is_in_efo(uri)
oxo_mapping.in_efo = uri_is_in_efo
else:
oxo_mapping.in_efo = uri_is_current_and_in_efo
oxo_mapping.is_current = uri_is_current_and_in_efo
oxo_result.mapping_list.append(oxo_mapping)
oxo_result_list.append(oxo_result)
return oxo_result_list
def get_oxo_results(id_list: list, target_list: list, distance: int) -> list:
"""
Use list of ontology IDs, datasource targets and distance call function to query OxO and return
a list of OxOResults.
:param id_list: List of ontology IDs with which to find xrefs using OxO
:param target_list: List of ontology datasources to include
:param distance: Number of steps to take through xrefs to find mappings
:return: List of OxOResults based upon results from request made to OxO
"""
url = "https://www.ebi.ac.uk/spot/oxo/api/search?size=5000"
payload = build_oxo_payload(id_list, target_list, distance)
try:
oxo_response = json_request(url, payload, requests.post)
except requests.HTTPError:
# Sometimes, OxO fails to process a completely valid request even after several attempts.
# See https://github.com/EBISPOT/OXO/issues/26 for details
logger.error('OxO failed to process request for id_list {} (probably a known bug in OxO)'.format(id_list))
return []
if oxo_response is None:
return []
if "_embedded" not in oxo_response:
logger.warning("Cannot parse the response from OxO for the following identifiers: {}".format(','.join(id_list)))
return []
return get_oxo_results_from_response(oxo_response)
| {
"repo_name": "EBIvariation/eva-cttv-pipeline",
"path": "eva_cttv_pipeline/trait_mapping/oxo.py",
"copies": "1",
"size": "8085",
"license": "apache-2.0",
"hash": 3301269777967984600,
"line_mean": 34.9333333333,
"line_max": 120,
"alpha_frac": 0.6180581323,
"autogenerated": false,
"ratio": 3.3422902025630425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44603483348630424,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
class Room:
def __init__(self, name, length, width):
self.name = name
self.length = length
self.width = width
self.square_feet = self.length * self.width
@total_ordering
class House:
def __init__(self, name, style):
self.name = name
self.style = style
self.rooms = list()
@property
def living_space_footage(self):
return sum(r.square_feet for r in self.rooms)
def add_room(self, room):
self.rooms.append(room)
def __str__(self):
return '{}: {} square foot {}'.format(self.name,
self.living_space_footage,
self.style)
def __eq__(self, other):
return self.living_space_footage == other.living_space_footage
def __lt__(self, other):
return self.living_space_footage < other.living_space_footage
# Build a few houses, and add rooms to them.
h1 = House('h1', 'Cape')
h1.add_room(Room('Master Bedroom', 14, 21))
h1.add_room(Room('Living Room', 18, 20))
h1.add_room(Room('Kitchen', 12, 16))
h1.add_room(Room('Office', 12, 12))
h2 = House('h2', 'Ranch')
h2.add_room(Room('Master Bedroom', 14, 21))
h2.add_room(Room('Living Room', 18, 20))
h2.add_room(Room('Kitchen', 12, 16))
h3 = House('h3', 'Split')
h3.add_room(Room('Master Bedroom', 14, 21))
h3.add_room(Room('Living Room', 18, 20))
h3.add_room(Room('Office', 12, 16))
h3.add_room(Room('Kitchen', 15, 17))
houses = [h1, h2, h3]
print("Is h1 bigger than h2?", h1 > h2) # prints True
print("Is h2 smaller than h3?", h2 < h3) # prints True
print("Is h2 greater than or equal to h1?", h2 >= h1) # prints False
print("Which one is biggest?", max(houses)) # prints 'h3: 1101 square foot Split'
print("Which is smallest?", min(houses)) # prints 'h2: 846 square foot Ranch'
| {
"repo_name": "hyller/CodeLibrary",
"path": "python-cookbook-master/src/8/making_classes_support_comparison_operations/example.py",
"copies": "2",
"size": "1884",
"license": "unlicense",
"hash": 6044864278287940000,
"line_mean": 30.9322033898,
"line_max": 81,
"alpha_frac": 0.601910828,
"autogenerated": false,
"ratio": 2.94375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9516737200672192,
"avg_score": 0.00578472546556177,
"num_lines": 59
} |
from functools import total_ordering
from collections import Hashable
@total_ordering
class CaseClass(object):
"""
Implementation like Scala's case class
"""
def __init__(self, keys):
"""
:param keys: list of attribute names
"""
self.__keys = keys
def __eq__(self, other):
if not isinstance(other, self.__class__):
# compare with class names
return self.__class__.__name__ == other.__class__.__name__
for k in self.__keys:
a, b = getattr(self, k), getattr(other, k)
if a != b:
return False
return True
def __lt__(self, other):
if not isinstance(other, self.__class__):
# compare with class names
return self.__class__.__name__ < other.__class__.__name__
for k in self.__keys:
a, b = getattr(self, k), getattr(other, k)
try:
# None is always less than any datatype
if a is None and b is not None:
return True
elif a is not None and b is None:
return False
elif a < b:
return True
elif a > b:
return False
except TypeError:
a, b = self.__to_comparable(a), self.__to_comparable(b)
if a < b:
return True
elif a > b:
return False
return False
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join('%s=%r' % (k, getattr(self, k)) for k in self.__keys))
def __to_comparable(self, value):
if isinstance(value, Hashable):
return hash(value)
return repr(value)
| {
"repo_name": "mogproject/artifact-cli",
"path": "src/artifactcli/util/caseclass.py",
"copies": "1",
"size": "1800",
"license": "apache-2.0",
"hash": -2029655216058803500,
"line_mean": 29.5084745763,
"line_max": 116,
"alpha_frac": 0.475,
"autogenerated": false,
"ratio": 4.545454545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014486455164421268,
"num_lines": 59
} |
from functools import total_ordering
from datetime import datetime
from CMi.utils import title_sort_key
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return '%s' % self.name
@total_ordering
class Show(models.Model):
name = models.CharField(max_length=200, unique=True)
description = models.TextField(blank=True)
canonical_name = models.CharField(max_length=200)
auto_erase = models.BooleanField(default=True)
source = models.CharField(max_length=255, blank=True)
ended = models.BooleanField(default=False)
category = models.ForeignKey(Category, null=True)
def __unicode__(self):
return '%s' % self.name
def unwatched_episodes(self):
return self.episodes.exclude(filepath='').filter(watched=False)
def watchable_episodes(self):
eps = self.episodes.exclude(filepath='')
if self.auto_erase:
return eps.filter(watched=False)
else:
return eps
def __eq__(self, other):
if not isinstance(other, Show):
return False
return self.name == other.name
def __lt__(self, other):
return title_sort_key(self.name) == title_sort_key(other.name)
class Meta:
ordering = ['name']
class SuggestedShow(models.Model):
name = models.CharField(max_length=200, unique=True)
ignored = models.BooleanField(default=False)
def __unicode__(self):
if self.ignored:
return 'ignored: %s' % self.name
else:
return self.name
class Meta:
ordering = ['name']
class Episode(models.Model):
show = models.ForeignKey(Show, related_name='episodes')
name = models.CharField(max_length=200)
season = models.IntegerField()
episode = models.IntegerField(default=0)
aired = models.DateField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
watched = models.BooleanField(default=False)
position = models.FloatField(default=0)
filepath = models.TextField(blank=True)
watched_count = models.IntegerField(default=False)
watched_at = models.DateTimeField(default=datetime(1970, 1, 1))
def __unicode__(self):
if self.aired:
return '%s %s' % (self.show, self.aired)
else:
return '%s s%se%s %s' % (self.show, self.season, self.episode, self.name)
class Meta:
ordering = ['show__name', 'season', 'episode', 'aired']
def watched_range(self): # used by template
return xrange(self.watched_count) | {
"repo_name": "boxed/CMi",
"path": "web_frontend/CMi/tvshows/models.py",
"copies": "1",
"size": "2611",
"license": "mit",
"hash": -370314084388931800,
"line_mean": 29.7294117647,
"line_max": 85,
"alpha_frac": 0.6491765607,
"autogenerated": false,
"ratio": 3.7895500725689404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493872663326894,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from dateutil.parser import parse as parse_date
from collections import defaultdict
def parse_data(data):
if type(data) == list:
data = data[0]
if "_datatype" in data:
if data["_datatype"] == "dateTime":
return parse_date(data["_value"])
else:
return data["_value"]
def parse_date_element(el):
if not el.text:
return None
return parse_date(el.text)
class Resource(object):
def __init__(self):
self.resource = None
@property
def resource_id(self):
return int(self.resource.split("/")[-1])
@total_ordering
class Division(Resource):
def __init__(self, house):
super().__init__()
self.house = house
self.parl = house.parl
self._data_fetched = False
self.title = None
self.uin = None
self.date = None
def _fetch_data(self):
res = self.parl.get(
"%sdivisions/id/%s.json" % (self.house.name.lower(), self.resource_id)
)
data = res["primaryTopic"]
if self.house.name == "Commons":
self.abstain = int(parse_data(data["AbstainCount"]))
self.ayes = int(parse_data(data["AyesCount"]))
self.did_not_vote = int(parse_data(data["Didnotvotecount"]))
self.error_vote = int(parse_data(data["Errorvotecount"]))
self.margin = int(parse_data(data["Margin"]))
self.noes = int(parse_data(data["Noesvotecount"]))
self.non_eligible = int(parse_data(data["Noneligiblecount"]))
self.suspended_expelled = int(
parse_data(data["Suspendedorexpelledvotescount"])
)
self.votes = {"aye": MemberList(), "no": MemberList()}
for vote in data["vote"]:
member = self.house.members.from_vote(vote)
if vote["type"] == "http://data.parliament.uk/schema/parl#AyeVote":
self.votes["aye"].append(member)
elif vote["type"] == "http://data.parliament.uk/schema/parl#NoVote":
self.votes["no"].append(member)
elif self.house.name == "Lords":
self.contents = int(data["officialContentsCount"])
self.not_contents = int(data["officialNotContentsCount"])
self.votes = {"content": MemberList(), "not_content": MemberList()}
for vote in data["vote"]:
member = self.house.members.from_vote(vote)
if vote["type"] == "http://data.parliament.uk/schema/parl#ContentVote":
self.votes["content"].append(member)
elif (
vote["type"]
== "http://data.parliament.uk/schema/parl#NotContentVote"
):
self.votes["not_content"].append(member)
self._data_fetched = True
@property
def passed(self):
if self.house.name == "Commons":
return self.ayes > self.noes
elif self.house.name == "Lords":
return self.contents > self.not_contents
def __eq__(self, other):
return type(other) == type(self) and other.uin == self.uin
def __gt__(self, other):
if self.date == other.date:
return other.uin > self.uin
return other.date > self.date
def __repr__(self):
return '<%s division: "%s" on %s>' % (self.house.name, self.title, self.date)
def __getattr__(self, name: str):
if not self._data_fetched:
self._fetch_data()
res = getattr(self, name)
if res is None:
raise AttributeError()
return res
raise AttributeError()
class EDM(Resource):
def __repr__(self):
return '<EDM %s: "%s">' % (self.number, self.title)
class Bill(Resource):
def __init__(self, parl):
self.parl = parl
self.title = None
self.home_page = None
self.type = None
self.date = None
def fetch_data(self):
res = self.parl.get("bills/%s.json" % self.resource_id)["primaryTopic"]
self.description = res["description"]
def __repr__(self):
return '<Bill "%s" (%s)>' % (self.title, self.date)
class Member(Resource):
def __init__(self, parl, house, member_id):
self.house = house
self.id = member_id
self.parl = parl
self.display_name = None
self.party = None
self._data_fetched = False
def _fetch_data(self):
data = self.parl.get_members(id=self.id)
mem = data.find("Member")
if mem is None:
raise ValueError("Unable to load data for member with id %s!" % self.id)
self._populate_data(mem)
def _populate_data(self, data):
self.dods_id = int(data.get("Dods_Id"))
self.pims_id = int(data.get("Pims_Id"))
self.display_name = data.find("DisplayAs").text
self.party = self.parl.parties.from_name(data.find("Party").text)
self.full_name = data.find("FullTitle").text
self.date_of_birth = parse_date_element(data.find("DateOfBirth"))
self.start_date = parse_date_element(data.find("HouseStartDate"))
self.end_date = parse_date_element(data.find("HouseEndDate"))
self.gender = data.find("Gender").text
if self.house.name == "Commons":
self.constituency = data.find("MemberFrom").text
else:
# The type of peer is in the MemberFrom field here..
self.member_type = data.find("MemberFrom").text
self._data_fetched = True
def __repr__(self):
return "<Member ({}) #{} {} ({})>".format(
self.house.name, self.id, self.display_name, self.party
)
def __getattr__(self, name: str):
if not self._data_fetched:
self._fetch_data()
res = getattr(self, name)
if res is None:
raise AttributeError()
return res
raise AttributeError()
class MemberList(list):
def by_party(self):
bp = defaultdict(lambda: 0)
for item in self:
bp[item.party] += 1
return dict(bp)
| {
"repo_name": "russss/ukparliament",
"path": "ukparliament/resource.py",
"copies": "1",
"size": "6194",
"license": "mit",
"hash": -1596084794488172300,
"line_mean": 32.4810810811,
"line_max": 87,
"alpha_frac": 0.5561834033,
"autogenerated": false,
"ratio": 3.7178871548619448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47740705581619447,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from ipaddr import IPAddress
from itertools import ifilter
def is_rangestmt(x):
return isinstance(x, RangeStmt)
def join_p(xs, indent=1, prefix=''):
if not xs:
return ''
lines = "".join(map(str, xs)).splitlines()
prefix += ' ' * indent
return "".join(prefix + line + '\n' for line in lines)
@total_ordering
class DHCPMixin(object):
side = ''
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.TYPE < other.TYPE or (self.TYPE == other.TYPE and
self._sort_key < other._sort_key)
def __str__(self):
s = ''
if hasattr(self, 'contents') and self.contents:
map(lambda x: x.set_sort_key(), self.contents)
if hasattr(self, 'comment') and self.comment:
comment = ' # ' + self.comment
else:
comment = ''
s += self.side + self.firstline + ' {' + comment + '\n'
s += join_p(sorted(self.contents), prefix=self.side)
s += self.side + '}\n'
if hasattr(self, 'related') and self.related:
map(lambda x: x.set_sort_key(), self.related)
s += join_p(sorted(self.related), indent=0)
# they print their own side
return s
class Statement(DHCPMixin):
TYPE = 1
def __init__(self, statement):
self.statement = statement
def set_sort_key(self):
self._sort_key = self.statement
def __eq__(self, other):
return (isinstance(other, Statement)
and self.statement == other.statement)
def __hash__(self):
return hash(self.statement)
def __str__(self):
return self.side + self.statement + ';\n'
class RangeStmt(Statement):
TYPE = 0
def __init__(self, start, end):
self.statement = 'range {0} {1}'.format(start, end)
self.start = start
self.end = end
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.start)),
int(IPAddress(self.end)))
def __eq__(self, other):
return (isinstance(other, RangeStmt) and self.start == other.start
and self.end == other.end)
class Pool(DHCPMixin):
TYPE = 2
def __init__(self, contents=None):
self.contents = set(contents or [])
self.firstline = 'pool'
rs = next(ifilter(is_rangestmt, contents))
self.start, self.end = rs.start, rs.end
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.start)),
int(IPAddress(self.end)))
def __eq__(self, other):
return (isinstance(other, Pool) and self.start == other.start
and self.end == other.end)
def __hash__(self):
return hash(self.start + self.end)
class Subnet(DHCPMixin):
TYPE = 3
def __init__(self, netaddr, netmask, contents=None):
self.netaddr = netaddr
self.netmask = netmask
self.contents = set(contents or [])
self.firstline = 'subnet {0} netmask {1}'.format(self.netaddr,
self.netmask)
def set_sort_key(self):
self._sort_key = (int(IPAddress(self.netaddr)),
int(IPAddress(self.netmask)))
def __eq__(self, other):
return (isinstance(other, Subnet) and self.netaddr == other.netaddr
and self.netmask == other.netmask)
def __hash__(self):
return hash(self.netaddr + self.netmask)
class Subclass(DHCPMixin):
TYPE = 4
def __init__(self, classname, match, contents=None):
self.classname = classname
self.match = match
self.contents = set(contents or [])
self.firstline = 'subclass "{0}" {1}'.format(self.classname,
self.match)
def set_sort_key(self):
self._sort_key = self.classname + self.match
def __eq__(self, other):
return (isinstance(other, Subclass)
and self.classname == other.classname
and self.match == other.match)
def __hash__(self):
return hash(self.classname + self.match)
def __str__(self):
if self.contents:
return super(Subclass, self).__str__()
else:
return self.side + self.firstline + ';\n'
class Class(DHCPMixin):
TYPE = 5
def __init__(self, name, contents=None, related=None):
self.name = name
self.contents = set(contents or [])
self.related = set(related or [])
self.firstline = 'class "{0}"'.format(self.name)
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Class) and self.name == other.name
def __hash__(self):
return hash(self.name)
def add_subclass(self, match, contents):
self.related.add(Subclass(self.name, match, contents))
class Group(DHCPMixin):
TYPE = 6
def __init__(self, name, contents=None):
self.name = name
self.contents = set(contents or [])
self.firstline = 'group'
self.comment = self.name
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Group) and self.name == other.name
def __hash__(self):
return hash(self.name)
class Host(DHCPMixin):
TYPE = 7
def __init__(self, name, contents=None):
self.name = name
self.contents = set(contents or [])
self.firstline = 'host ' + self.name
def set_sort_key(self):
self._sort_key = self.name
def __eq__(self, other):
return isinstance(other, Host) and self.name == other.name
def __hash__(self):
return hash(self.name)
class ConfigFile(DHCPMixin):
def __init__(self, related=None):
self.related = set(related or [])
def add(self, obj):
if obj:
self.related.add(obj)
def get_class(self, name):
classes = ifilter(lambda x: isinstance(x, Class) and x.name == name,
self.related)
return next(classes)
| {
"repo_name": "zeeman/cyder",
"path": "cyder/management/commands/lib/dhcpd_compare2/dhcp_objects.py",
"copies": "2",
"size": "6248",
"license": "bsd-3-clause",
"hash": 9148551211715906000,
"line_mean": 26.6460176991,
"line_max": 76,
"alpha_frac": 0.5523367478,
"autogenerated": false,
"ratio": 3.8759305210918114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 226
} |
from functools import total_ordering
from iso4217 import Currency as ISO4217Currency
_ALL_CURRENCIES = {}
@total_ordering
class Currency(object):
"""A currency identifier, as defined by ISO-4217.
Parameters
----------
code : str
ISO-4217 code for the currency.
Attributes
----------
code : str
ISO-4217 currency code for the currency, e.g., 'USD'.
name : str
Plain english name for the currency, e.g., 'US Dollar'.
"""
def __new__(cls, code):
try:
return _ALL_CURRENCIES[code]
except KeyError:
if code is None:
name = "NO CURRENCY"
else:
try:
name = ISO4217Currency(code).currency_name
except ValueError:
raise ValueError(
"{!r} is not a valid currency code.".format(code)
)
obj = _ALL_CURRENCIES[code] = super(Currency, cls).__new__(cls)
obj._code = code
obj._name = name
return obj
@property
def code(self):
"""ISO-4217 currency code for the currency.
Returns
-------
code : str
"""
return self._code
@property
def name(self):
"""Plain english name for the currency.
Returns
-------
name : str
"""
return self._name
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.code == other.code
def __hash__(self):
return hash(self.code)
def __lt__(self, other):
return self.code < other.code
def __repr__(self):
return "{}({!r})".format(
type(self).__name__,
self.code
)
| {
"repo_name": "quantopian/zipline",
"path": "zipline/currency.py",
"copies": "1",
"size": "1825",
"license": "apache-2.0",
"hash": 3223705074458690000,
"line_mean": 22.7012987013,
"line_max": 75,
"alpha_frac": 0.4942465753,
"autogenerated": false,
"ratio": 4.366028708133971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5360275283433971,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from typing import Optional
@total_ordering
class Record:
"""Representation of the season record of a team. Ordering is
provided by win percentage, with ties considered 0.5 win.
Args:
wins: Starting number of wins, default 0
losses: Starting number of losses, default 0
ties: Starting number of ties, default 0
Attributes:
wins: Wins thus far, should be in [0,16]
losses: Losses thus far, should be in [0,16]
ties: Ties thus far, should be in [0,16]
"""
def __init__(self, wins: int=0, losses: int=0, ties: int=0):
self.wins = wins
self.losses = losses
self.ties = ties
self.Verify()
def __lt__(self, other: 'Record') -> bool:
"""A record is considered less than another if it has a lower
win percentage.
"""
return self.WinPercent() < other.WinPercent()
def __eq__(self, other: 'Record') -> bool:
return self.__dict__ == other.__dict__
def __str__(self):
return f'{self.wins}-{self.losses}-{self.ties}'
def __repr__(self):
return f'Record(wins={self.wins}, losses={self.losses}, ties={self.ties})'
@property
def games(self) -> int:
"""Total number of games played thus far"""
return self.wins + self.losses + self.ties
def Standings(self) -> str:
"""Create a string representing the standings for fixed width output
Returns:
`"wins losses ties"` with each one having leading zeroes
Examples:
>>> r = Record(9, 6, 1)
>>> print(r.Standings)
09 06 01
"""
return f'{self.wins:02d} {self.losses:02d} {self.ties:02d}'
def Verify(self):
"""Checks if the current record is valid.
Raises:
ValueError: if wins, losses, ties, or their sum is less than 0 or greater than 16
"""
if self.wins not in range(17):
raise ValueError(f"Wins is not between 0 and 16, found {self.wins}")
if self.losses not in range(17):
raise ValueError(f"Losses is not between 0 and 16, found {self.losses}")
if self.ties not in range(17):
raise ValueError(f"Ties is not between 0 and 16, found {self.ties}")
if self.wins + self.losses + self.ties not in range(17):
raise ValueError(f"Total games is not between 0 and 16, found {self.wins + self.losses + self.ties}")
def WinPercent(self) -> float:
"""Determines the win percentage for the current standings"""
return (self.wins + 0.5 * self.ties) / self.games
def AddWin(self):
"""Mutates the Record to add a win"""
self.wins += 1
def AddLoss(self):
"""Mutates the Record to add a loss"""
self.losses += 1
def AddTie(self):
"""Mutates the Record to add a tie"""
self.ties += 1
def IsUndefeated(self) -> bool:
"""Indicates whether the record corresponds to an undefeated (16-0-0) season"""
return self.wins == 16 and self.losses == self.ties == 0
@total_ordering
class ELO:
"""Class used to represent an ELO standing. Requires a team name and a starting ELO.
Optionally takes either wins, losses, and tries to build a `Record` or directly takes
a `Record`. If both are provided, only the `record` is used. ELO objects are ordered
based on the team's records; if the records compare equal then the ordering is based
on the current ELO rankings.
Args:
name: Team name
starting_elo: initial ELO ranking
wins (optional): Wins used to construct a `Record`, default 0
losses (optional): Losses used to construct a `Record`, default 0
ties (optional): Ties used to construct a `Record`, default 0
record (optional): Record to use, takes priority over `wins`, `losses`, `ties`
Attributes:
name (str): Team name
elo (int): current ELO ranking
record (Record): current team standings
"""
def __init__(self, name: str, starting_elo: int,
wins: int=0, losses: int=0, ties: int=0,
record: Optional[Record]=None):
self.name = name.strip("*")
self.elo = starting_elo
self.record = record or Record(wins, losses, ties)
@property
def wins(self) -> int:
return self.record.wins
@property
def losses(self) -> int:
return self.record.losses
@property
def ties(self) -> int:
return self.record.ties
def __lt__(self, other: 'ELO') -> bool:
"""Enable sorting by default from teams with highest to lowest win percentage.
If teams have the same win percentage, sorting follows by ELO rank.
"""
if self.record < other.record:
return True
if self.record > other.record:
return False
return self.elo < other.elo
def __eq__(self, other: 'ELO'):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"ELO(name='{self.name}', starting_elo={self.elo}, record={self.record!r})"
def __str__(self):
return f"{self.name} ({self.record}) ELO: {self.elo}"
def UpdateWin(self, points: float):
"""Updates the ELO and record for the current team assuming a win.
Args:
points: The number of ELO points the victory is worth, calculated elsewhere
"""
self.elo += rounded_int(points)
self.record.AddWin()
def UpdateLoss(self, points: float):
"""Updates the ELO and record for the current team assuming a loss.
Args:
points: The number of ELO points the loss is worth, calculated elsewhere
"""
self.elo -= rounded_int(points)
self.record.AddLoss()
def UpdateTies(self):
"""Updates the record for the current team assuming a tie. No ELO points are exchanged."""
self.record.AddTie()
def IsUndefeated(self) -> bool:
return self.record.IsUndefeated()
def rounded_int(value: float) -> int:
"""Rounds a floating point number before converting to int.
Args:
value: arbitrary number, should be non-inf and non-NaN
Returns:
Nearest integer to input
"""
return int(round(value))
def probability(elo_margin: int) -> float:
"""Calculates the probability of a win given an ELO difference.
Formula is 1.0 / (1.0 + 10^(-d/400)) for an ELO difference `d`.
Args:
elo_margin: The pre-game ELO difference between the two teams,
allowing for home team advantage or other modifiers
Returns:
Probability of a win
Examples:
>>> p = probability(75)
>>> print("{:0.3f}".format(p))
0.606
"""
exponent = -elo_margin / 400.0
den = 1.0 + 10.0**exponent
return 1.0 / den
def probability_points(pt_margin: float) -> float:
"""Calculates the probability of a win given by an expected point differential.
The point differential and ELO difference are related by a factor of 25.
Args:
pt_margin: Expected margin of victory in NFL game points
Returns:
Probability of winning
Examples:
>>> p = probability_points(3.)
>>> print("{:0.3f}".format(p))
0.606
"""
return probability(25 * pt_margin)
| {
"repo_name": "lbianch/nfl_elo",
"path": "elo.py",
"copies": "1",
"size": "7426",
"license": "mit",
"hash": -3045999061073095700,
"line_mean": 29.8132780083,
"line_max": 113,
"alpha_frac": 0.6019391328,
"autogenerated": false,
"ratio": 3.8636836628511966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49656227956511967,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from util.config import BaseConfig, IncorrectFieldType, IncorrectFieldFormat
@total_ordering
class SymVer(BaseConfig):
def __init__(self, major=0, minor=0, patch=0, **kwargs):
super().__init__(**kwargs)
if isinstance(major, int) and isinstance(minor, int) and isinstance(patch, int):
self.version = (major, minor, patch)
else:
raise AttributeError('SymVer should be constructed from 3 int args')
def __repr__(self):
return 'v{}.{}.{}'.format(*self.version)
def __lt__(self, other):
return self.version < other.version
def to_json(self):
return repr(self)
def from_json(self, json_doc: str, skip_unknown_fields=False):
if not isinstance(json_doc, str):
raise IncorrectFieldType(
'{}: SymVer can be constructed only from str - {} passed.'.format(self.path_to_node,
json_doc.__class__.__name__)
)
parts = json_doc.lstrip('v').split('.')
if len(parts) != 3 or not all(x.isdigit() for x in parts):
raise IncorrectFieldFormat(
'{}: SymVer field have vN.N.N format - got {}'.format(self.path_to_node, json_doc)
)
self.version = tuple(int(x) for x in parts)
| {
"repo_name": "LuckyGeck/dedalus",
"path": "util/symver.py",
"copies": "1",
"size": "1379",
"license": "mit",
"hash": 6322217794603593000,
"line_mean": 39.5588235294,
"line_max": 110,
"alpha_frac": 0.5656272661,
"autogenerated": false,
"ratio": 4.079881656804734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145508922904733,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import itertools as it
EXONIC_IMPACTS = set(["stop_gained",
"stop_lost",
"frameshift_variant",
"initiator_codon_variant",
"inframe_deletion",
"inframe_insertion",
"missense_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"transcript_ablation",
"transcript_amplification",
"feature_elongation",
"feature_truncation"])
IMPACT_SEVERITY = dict([
('non_coding_exon_variant', 'LOW'),
('incomplete_terminal_codon_variant', 'LOW'),
('stop_retained_variant', 'LOW'),
('synonymous_variant', 'LOW'),
('coding_sequence_variant', 'LOW'),
('5_prime_UTR_variant', 'LOW'),
('3_prime_UTR_variant', 'LOW'),
('intron_variant', 'LOW'),
('NMD_transcript_variant', 'LOW'),
('nc_transcript_variant', 'LOW'),
('upstream_gene_variant', 'LOW'),
('downstream_gene_variant', 'LOW'),
('intergenic_variant', 'LOW'),
('transcript_ablation', 'LOW'),
('transcript_amplification', 'LOW'),
('feature_elongation', 'LOW'),
('feature_truncation', 'LOW'),
('inframe_deletion', 'MED'),
('inframe_insertion', 'MED'),
('missense_variant', 'MED'),
('splice_region_variant', 'MED'),
('mature_miRNA_variant', 'MED'),
('regulatory_region_variant', 'MED'),
('TF_binding_site_variant', 'MED'),
('regulatory_region_ablation', 'MED'),
('regulatory_region_amplification', 'MED'),
('TFBS_ablation', 'MED'),
('TFBS_amplification', 'MED'),
('splice_acceptor_variant', 'HIGH'),
('splice_donor_variant', 'HIGH'),
('stop_gained', 'HIGH'),
('stop_lost', 'HIGH'),
('frameshift_variant', 'HIGH'),
('initiator_codon_variant', 'HIGH'),
])
# these are taken from the snpeff manual.
# with MODIFIER => LOW and MEDIUM => MED
IMPACT_SEVERITY_SNPEFF = dict([
('chromosome_number_variation', 'HIGH'),
('exon_loss_variant', 'HIGH'),
('rare_amino_acid_variant', 'HIGH'),
('start_lost', 'HIGH'),
('5_prime_UTR_premature_start_codon_gain_variant', 'LOW'),
('conserved_intergenic_variant', 'LOW'),
('conserved_intron_variant', 'LOW'),
('exon_variant', 'LOW'),
('gene_variant', 'LOW'),
('intergenic_region', 'LOW'),
('intragenic_variant', 'LOW'),
('miRNA', 'LOW'),
('non_coding_transcript_exon_variant', 'LOW'),
('non_coding_transcript_variant', 'LOW'),
('start_retained', 'LOW'),
('transcript_variant', 'LOW'),
('3_prime_UTR_truncation+exon_loss', 'MED'),
('5_prime_UTR_truncation+exon_loss_variant', 'MED'),
('disruptive_inframe_deletion', 'MED'),
('disruptive_inframe_insertion', 'MED')
])
# http://uswest.ensembl.org/info/genome/variation/predicted_data.html#consequences
IMPACT_SEVERITY_VEP = dict([
('transcript_ablation', 'HIGH'),
('splice_acceptor_variant', 'HIGH'),
('splice_donor_variant', 'HIGH'),
('stop_gained', 'HIGH'),
('frameshift_variant', 'HIGH'),
('stop_lost', 'HIGH'),
('start_lost', 'HIGH'),
('transcript_amplification', 'HIGH'),
('inframe_insertion', 'MED'),
('inframe_deletion', 'MED'),
('missense_variant', 'MED'),
('protein_altering_variant', 'MED'),
('regulatory_region_ablation', 'MED'),
('splice_region_variant', 'LOW'),
('incomplete_terminal_codon_variant', 'LOW'),
('stop_retained_variant', 'LOW'),
('synonymous_variant', 'LOW'),
('coding_sequence_variant', 'LOW'),
('mature_miRNA_variant', 'LOW'),
('5_prime_UTR_variant', 'LOW'),
('3_prime_UTR_variant', 'LOW'),
('non_coding_transcript_exon_variant', 'LOW'),
('intron_variant', 'LOW'),
('NMD_transcript_variant', 'LOW'),
('non_coding_transcript_variant', 'LOW'),
('upstream_gene_variant', 'LOW'),
('downstream_gene_variant', 'LOW'),
('TFBS_ablation', 'MODERATE'),
('TFBS_amplification', 'LOW'),
('TF_binding_site_variant', 'LOW'),
('regulatory_region_amplification', 'LOW'),
('feature_elongation', 'LOW'),
('regulatory_region_variant', 'LOW'),
('feature_truncation', 'LOW'),
('intergenic_variant', 'LOW'),
])
# I decided these myself.
IMPACT_SEVERITY_CUSTOM = dict([
('sequence_feature', 'LOW'),
('transcript', 'LOW'), # ? snpEff
# occurs with 'exon_loss' in snpEff
('3_prime_UTR_truncation', 'MED'),
('3_prime_UTR_truncation+exon_loss', 'MED'),
('3_prime_UTR_truncation+exon_loss_variant', 'MED'),
('exon_loss', 'MED'),
('5_prime_UTR_truncation', 'MED'),
('5_prime_UTR_truncation+exon_loss_variant', 'MED'),
('non_canonical_start_codon', 'LOW'),
('initiator_codon_variant', 'LOW'),
])
IMPACT_SEVERITY.update(IMPACT_SEVERITY_SNPEFF)
IMPACT_SEVERITY.update(IMPACT_SEVERITY_CUSTOM)
IMPACT_SEVERITY.update(IMPACT_SEVERITY_VEP)
@total_ordering
class Effect(object):
def __init__(self, effect_dict):
# or maybe arg should be a dict for Effect()
raise NotImplementedError
def __le__(self, other):
if self.is_pseudogene and not other.is_pseudogene:
return True
elif other.is_pseudogene and not self.is_pseudogene:
return False
if self.coding and not other.coding:
return False
elif other.coding and not self.coding:
return True
if self.severity != other.severity:
return self.severity <= other.severity
if self.sift_value < other.sift_value:
return True
if self.polyphen_value < other.polyphen_value:
return True
return True
#raise NotImplementedError
# TODO: look at transcript length?
@classmethod
def top_severity(cls, effects):
for i, e in enumerate(effects):
if isinstance(e, basestring):
effects[i] = cls(e)
if len(effects) == 1:
return effects[0]
effects = sorted(effects)
if effects[-1] > effects[-2]:
return effects[-1]
ret = [effects[-1], effects[-2]]
for i in range(-3, -(len(effects) - 1), -1):
if effects[-1] > effects[i]: break
ret.append(effects[i])
return ret
def __eq__(self, other):
if not isinstance(other, Effect): return False
return self.effect_string == other.effect_string
def __str__(self):
return repr(self)
def __repr__(self):
return "%s(%s-%s, %s)" % (self.__class__.__name__, self.gene,
self.consequence, self.impact_severity)
@property
def gene(self):
raise NotImplementedError
@property
def transcript(self):
raise NotImplementedError
@property
def exonic(self):
raise NotImplementedError
@property
def coding(self):
raise NotImplementedError
return True
@property
def lof(self):
return self.impact_severity == "HIGH" and self.biotype == "protein_coding"
raise NotImplementedError
return True
@property
def aa_change(self):
raise NotImplementedError
@property
def codon_change(self):
raise NotImplementedError
@property
def severity(self, lookup={'HIGH': 3, 'MED': 2, 'LOW': 1}, sev=IMPACT_SEVERITY):
# higher is more severe. used for ordering.
return max(lookup[IMPACT_SEVERITY[csq]] for csq in self.consequences)
@property
def impact_severity(self):
return ['xxx', 'LOW', 'MED', 'HIGH'][self.severity]
@property
def consequence(self):
raise NotImplementedError
@property
def biotype(self):
raise NotImplementedError
@property
def is_pseudogene(self): #bool
return 'pseudogene' in self.biotype
class SnpEff(Effect):
__slots__ = ('effects', 'effect_string')
keys = [x.strip() for x in 'Allele | Annotation | Annotation_Impact | Gene_Name | Gene_ID | Feature_Type | Feature_ID | Transcript_BioType | Rank | HGVS.c | HGVS.p | cDNA.pos / cDNA.length | CDS.pos / CDS.length | AA.pos / AA.length | Distance | ERRORS / WARNINGS / INFO'.split("|")]
def __init__(self, effect_string):
assert not "," in effect_string
assert not "=" == effect_string[3]
self.effect_string = effect_string
self.effects = dict(it.izip(self.keys, (x.strip() for x in effect_string.split("|", len(self.keys)))))
@property
def gene(self):
return self.effects['Gene_Name'] or None
@property
def transcript(self):
return self.effects['Feature_ID'] or None
@property
def exon(self):
return self.effects['Rank']
@property
def consequence(self):
if '&' in self.effects['Annotation']:
return self.effects['Annotation'].split('&')
return self.effects['Annotation']
@property
def consequences(self):
return self.effects['Annotation'].split('&')
@property
def biotype(self):
return self.effects['Transcript_BioType']
@property
def alt(self):
return self.effects['Allele']
@property
def coding(self):
# TODO: check start_gained and utr
return self.exonic and not "utr" in self.consequence and not "start_gained" in self.consequence
@property
def exonic(self):
csqs = self.consequence
if isinstance(csqs, basestring):
csqs = [csqs]
return any(csq in EXONIC_IMPACTS for csq in csqs) and self.effects['Transcript_BioType'] == 'protein_coding'
# not defined in ANN field.
aa_change = None
sift = None
sift_value = None
sift_class = None
polyphen = None
polyphen_value = None
polyphen_class = None
class VEP(Effect):
keys = "Consequence|Codons|Amino_acids|Gene|SYMBOL|Feature|EXON|PolyPhen|SIFT|Protein_position|BIOTYPE".split("|")
def __init__(self, effect_string, keys=None):
assert not "," in effect_string
assert not "=" in effect_string
self.effect_string = effect_string
if keys is not None: self.keys = keys
self.effect_string = effect_string
self.effects = dict(it.izip(self.keys, (x.strip() for x in effect_string.split("|"))))
@property
def gene(self):
return self.effects['SYMBOL'] or self.effects['Gene']
@property
def transcript(self):
return self.effects['Feature']
@property
def exon(self):
return self.effects['EXON']
@property
def consequence(self):
if '&' in self.effects['Consequence']:
return self.effects['Consequence'].split('&')
return self.effects['Consequence']
@property
def consequences(self):
return self.effects['Consequence'].split('&')
@property
def biotype(self):
return self.effects['BIOTYPE']
@property
def alt(self):
return self.effects.get('ALLELE')
@property
def coding(self):
# what about start/stop_gained?
return self.exonic and any(csq[1:] != "_prime_UTR_variant" for csq in self.consequences)
def exonic(self):
return any(csq in EXONIC_IMPACTS for csq in self.consequences) and self.effects['BIOTYPE'] == 'protein_coding'
@property
def sift(self):
return self.effects['SIFT']
@property
def sift_value(self):
try:
return float(self.effects['SIFT'].split("(")[1][:-1])
except IndexError:
return None
@property
def sift_class(self):
try:
return self.effects['SIFT'].split("(")[0]
except IndexError:
return None
@property
def polyphen(self):
return self.effects['PolyPhen']
@property
def polyphen_value(self):
try:
return float(self.effects['PolyPhen'].split('(')[1][:-1])
except IndexError:
return None
@property
def polyphen_class(self):
try:
return self.effects['PolyPhen'].split('(')[0]
except:
return None
@property
def aa_change(self):
return self.effects['Amino_acids']
if __name__ == "__main__":
s = SnpEff("A|stop_gained|HIGH|C1orf170|ENSG00000187642|transcript|ENST00000433179|protein_coding|3/5|c.262C>T|p.Arg88*|262/3064|262/2091|88/696||")
print s.effects
print s.gene, s.transcript, s.consequence, s.is_pseudogene
s = SnpEff("G|splice_donor_variant&intron_variant|HIGH|WASH7P|ENSG00000227232|transcript|ENST00000423562|unprocessed_pseudogene|6/9|n.822+2T>C||||||")
print s.is_pseudogene
s = SnpEff("G|missense_variant|MODERATE|OR4F5|ENSG00000186092|transcript|ENST00000335137|protein_coding|1/1|c.338T>G|p.Phe113Cys|338/918|338/918|113/305||")
print s.coding, s.consequence, s.aa_change
| {
"repo_name": "seandavi/effects",
"path": "effects/effect.py",
"copies": "1",
"size": "13090",
"license": "mit",
"hash": 2113110052866051000,
"line_mean": 29.8,
"line_max": 287,
"alpha_frac": 0.5950343774,
"autogenerated": false,
"ratio": 3.443830570902394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9531221022760845,
"avg_score": 0.001528785108309674,
"num_lines": 425
} |
from functools import total_ordering
import json
import warnings
from ._util import cheap_repr, for_json
@total_ordering
class CardSet:
def __init__(self, name, release_date=None, fetch=False, abbreviations=None,
**data):
self.name = name
self.release_date = release_date
self.fetch = fetch
self.abbreviations = abbreviations or {}
for k,v in data.items():
setattr(self, k, v)
def __str__(self):
return self.name
def __repr__(self):
return cheap_repr(self)
def __eq__(self, other):
return type(self) is type(other) and vars(self) == vars(other)
def __le__(self, other):
# Sets without release dates are sorted after those with dates.
if type(self) is type(other):
selfdate = self.release_date
otherdate = other.release_date
if (selfdate is None) ^ (otherdate is None):
return otherdate is None
elif selfdate != otherdate:
return selfdate < otherdate
elif self.name != other.name:
return self.name < other.name
else:
return vars(self) <= vars(other)
else:
return NotImplemented
def for_json(self):
return for_json(vars(self), trim=True)
class CardSetDB:
DEFAULT_DATAFILE = 'data/sets.json'
def __init__(self, infile=None):
if infile is None:
infile = open(self.DEFAULT_DATAFILE)
with infile:
self.sets = [CardSet(**d) for d in json.load(infile)]
self.sourcefile = infile.name
self.byName = {}
self.byGatherer = {}
for cs in self.sets:
if cs.name is None:
warnings.warn('%s: set with unset name' % (infile.name,))
elif cs.name in self.byName:
warnings.warn('%s: name %r used for more than one set;'
' subsequent appearance ignored'
% (infile.name, cs.name))
else:
self.byName[cs.name] = cs
gath = cs.abbreviations.get("Gatherer")
if gath is not None:
if gath in self.byGatherer:
warnings.warn('%s: Gatherer abbreviation %r already used'
' for set %r; subsequent use for set %r'
' ignored'
% (infile.name, gath,
self.byGatherer[gath].name, cs.name))
else:
self.byGatherer[gath] = cs
def toFetch(self):
return filter(lambda s: s.fetch, self.sets)
def __len__(self):
return len(self.sets)
def __iter__(self):
return iter(self.sets)
def __repr__(self):
return cheap_repr(self)
def for_json(self):
return for_json(vars(self), trim=True)
| {
"repo_name": "jwodder/envec",
"path": "envec/cardset.py",
"copies": "1",
"size": "2967",
"license": "mit",
"hash": -2096027354381589800,
"line_mean": 31.9666666667,
"line_max": 80,
"alpha_frac": 0.5214020897,
"autogenerated": false,
"ratio": 4.086776859504132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019470464377871785,
"num_lines": 90
} |
from functools import total_ordering
import re
from .color import Color
from ._util import cheap_repr, split_mana, for_json
@total_ordering
class Content:
def __init__(self, name, types, cost=None, supertypes=(), subtypes=(),
text=None, power=None, toughness=None, loyalty=None,
hand=None, life=None, color_indicator=None):
self.name = name # string
self.types = tuple(types) # tuple of strings
self.cost = cost # string or None
self.supertypes = tuple(supertypes) # tuple of strings
self.subtypes = tuple(subtypes) # tuple of strings
self.text = text # string or None
self.power = power # string or None
self.toughness = toughness # string or None
self.loyalty = loyalty # string or None
self.hand = hand # string or None
self.life = life # string or None
self.color_indicator = color_indicator # Color or None
@property
def color(self):
if self.name == 'Ghostfire' or \
(self.text and 'devoid' in self.baseText.lower()):
return Color.COLORLESS
if self.color_indicator is not None:
return self.color_indicator
else:
return Color.fromString(self.cost or '')
@property
def colorID(self):
# Since Innistrad, cards that formerly said "[This card] is [color]"
# now have color indicators instead, so there's no need to check for
# such strings.
colors = self.color
txt = self.baseText or ''
# Reminder text is supposed to be ignored for the purposes of
# establishing color identity, though (as of Dark Ascension) Charmed
# Pendant and Trinisphere appear to be the only cards for which this
# makes a difference.
for c in Color.WUBRG:
if re.search(r'\{(./)?' + c.name + r'(/.)?\}', txt):
colors |= c
if self.isType('Land'):
# Basic land types aren't _de jure_ part of color identity, but
# rule 903.5d makes them a part _de facto_ anyway.
if self.isSubtype('Plains'):
colors |= Color.WHITE
if self.isSubtype('Island'):
colors |= Color.BLUE
if self.isSubtype('Swamp'):
colors |= Color.BLACK
if self.isSubtype('Mountain'):
colors |= Color.RED
if self.isSubtype('Forest'):
colors |= Color.GREEN
return colors
@property
def cmc(self):
if not self.cost:
return 0
cost = 0
for c in split_mana(self.cost)[0]:
m = re.search(r'(\d+)', c)
if m:
cost += int(m.group(1))
elif any(ch in c for ch in 'WUBRGSwubrgs'):
# This weeds out {X}, {Y}, etc.
cost += 1
return cost
@property
def type(self):
return ' '.join(self.supertypes + self.types +
(('—',) + self.subtypes if self.subtypes else ()))
def isSupertype(self, type_):
return type_ in self.supertypes
def isType(self, type_):
return type_ in self.types
def isSubtype(self, type_):
return type_ in self.subtypes
def hasType(self, type_):
return self.isType(type_) or self.isSubtype(type_) \
or self.isSupertype(type_)
def isNontraditional(self):
return self.isType('Vanguard') or self.isType('Plane') \
or self.isType('Phenomenon') or self.isType('Scheme')
@property
def PT(self):
if self.power is not None:
return '%s/%s' % (self.power, self.toughness)
else:
return None
@property
def HandLife(self):
if self.hand is not None:
return '%s/%s' % (self.hand, self.life)
else:
return None
def __eq__(self, other):
return type(self) is type(other) and vars(self) == vars(other)
def __le__(self, other):
if type(self) is type(other):
return vars(self) <= vars(other)
else:
return NotImplemented
@classmethod
def fromDict(cls, obj):
if isinstance(obj, cls):
return obj.copy()
else:
return cls(**obj)
@property
def baseText(self): # Returns rules text without reminder text
if self.text is None:
return None
txt = re.sub(r'\([^()]+\)', '', self.text)
# It is assumed that text is reminder text if & only if it's enclosed
# in parentheses.
return '\n'.join(filter(None, map(str.strip, txt.splitlines())))
def __repr__(self):
return cheap_repr(self)
def for_json(self):
return for_json(vars(self), trim=True)
def devotion(self, to_color):
if not self.cost:
return 0
devot = 0
for c in split_mana(self.cost)[0]:
c = Color.fromString(c)
if any(to_color & c):
devot += 1
return devot
| {
"repo_name": "jwodder/envec",
"path": "envec/content.py",
"copies": "1",
"size": "5306",
"license": "mit",
"hash": 4786414736169528000,
"line_mean": 33.8947368421,
"line_max": 77,
"alpha_frac": 0.5288461538,
"autogenerated": false,
"ratio": 3.9760119940029983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004858147802997,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import string
import math
symbols = string.ascii_uppercase + "[]\;',./"
x_digits = 4
y_digits = 2
kb_height = 3
kb_width = 11
costless = " \t\n" + string.digits + "`~!@#$%^&*()_+-="
rule_3_penalty = 20
mod_factor = 2
def equivalent_keys(k1, k2):
multi = "[];',./|"
equiv = "{}:\"<>?\\"
if k1 in multi and k2 in equiv:
return multi.index(k1) == equiv.index(k2)
elif k1 in equiv and k2 in multi:
return equiv.index(k1) == multi.index(k2)
return False
def ch_swap(s1, s2, index):
swap1, swap2 = s1[index], s2[index]
new_s1 = s1[:index] + swap2 + s1[index+1:]
new_s2 = s2[:index] + swap1 + s2[index+1:]
return (new_s1, new_s2)
def bin2(n, num_bits):
b = bin(n).replace("0b", '')
return '0' * (num_bits - len(b)) + b
def bin_to_int(bs):
l = len(bs)
return sum(int(bs[l - i - 1]) * 2 ** i for i in range(l))
@total_ordering
class KBCoord(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __le__(self, other):
return (self.y * 10 + self.x) < (other.y * 10 + other.x)
#return (1 if (p1.y * 10 + p1.x) > (p2.y * 10 + p2.x) else -1)
def __hash__(self):
return hash(float(self.x * self.y) / float(self.x + self.y + 1))
def __repr__(self):
return "({0}, {1})".format(self.x, self.y)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def distance(self, other):
return abs(self.x - other.x) + abs(self.y - other.y)
class Finger(object):
def __init__(self, homeCoord, rh):
self.home = homeCoord
self.position = homeCoord
self.left_hand = not rh
self.right_hand = rh
def __eq__(self, other):
return self.home == other.home
def home_position(right_hand, index):
return KBCoord((index + 6) if right_hand else index, 1)
home_position = staticmethod(home_position)
num_typing_fingers = 4
class TSM(object):
def __init__(self):
self.keyboard = {}
self.fingers = [Finger(Finger.home_position(False, i), False)
for i in range(Finger.num_typing_fingers)]
self.fingers += [Finger(Finger.home_position(True, i), True)
for i in range(Finger.num_typing_fingers)]
self.gene_seq = None
def initialize(self, bitstr):
self.gene_seq = bitstr
for symbol in symbols:
x = bin_to_int(bitstr[:x_digits])
y = bin_to_int(bitstr[x_digits : x_digits + y_digits])
bitstr = bitstr[x_digits + y_digits:]
self.keyboard[symbol] = KBCoord(x, y)
return self
def find_key(self, key):
if key in self.keyboard:
return (self.keyboard[key], False)
for symbol in symbols[len(symbols) - len(string.ascii_uppercase):]:
if equivalent_keys(symbol, key):
return (self.keyboard[symbol], True)
return None
def find_nearest_finger(self, coord):
distances = [coord.distance(finger.position) for finger in self.fingers]
return self.fingers[distances.index(min(distances))]
def take_step(self, next_key):
next_key = next_key.upper()
if next_key in costless:
return 0
coord, mod = self.find_key(next_key)
finger = self.find_nearest_finger(coord)
dist = finger.position.distance(coord)
for f in self.fingers:
f.position = f.home
finger.position = coord
if dist > 3:
return rule_3_penalty
return (1 if not mod else mod_factor) * dist
def score(self, source):
numerator = sum(self.take_step(char) for char in source)
denominator = float(len(source))
return numerator / denominator
def formatted(self):
rev = {self.keyboard[k] : k for k in self.keyboard.keys()}
coords = sorted(list(rev.keys()))
last_y = 0
fstr = ""
for coord in coords:
if coord.y > last_y:
last_y = coord.y
fstr += '\n'
fstr += rev[coord] + ' '
return fstr
| {
"repo_name": "zsck/Evokey",
"path": "src/util.py",
"copies": "2",
"size": "4231",
"license": "mit",
"hash": -6760279072195880000,
"line_mean": 29.8832116788,
"line_max": 80,
"alpha_frac": 0.5554242496,
"autogenerated": false,
"ratio": 3.210166919575114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9734029800840642,
"avg_score": 0.00631227366689411,
"num_lines": 137
} |
from functools import total_ordering
import sys
from PyFBA import log_and_message
COMMON_REACTION_LIMIT = 5
class Compound:
"""
A compound is the essential metabolic compound that is involved in a reaction.
The compound by itself does not have a location. See PyFBA.metabolism.CompoundWithLocation for that detail.
This abstraction allows us to create Compound objects and then create separate objects with a location
that are required for the FBA
Other variables associated with the Compound class:
:ivar name: the name of the compound
:ivar reactions: a set of reaction objects that this compound is connected to
:ivar model_seed_id: the compound id from the model seed.
:ivar abbreviation: a short name for the compound
:ivar formula: the compounds formula
:ivar mw: the molecular weight of the compound
:ivar common: Boolean: this is a common compound. This means the coompound is in > COMMON_REACTION_LIMIT reactions
:ivar charge: the charge associated with the compound
"""
def __init__(self, cpd_id, name, verbose=False):
"""
Initiate the object
:param cpd_id: The id of the compound
:type cpd_id: str
:param name: The name of the compound
:type name: str
:return:
:rtype:
"""
self.id = cpd_id
if name.lower() == 'fe2' or name.lower() == 'fe+2' or name == 'fe2+':
log_and_message(f"Warning: {name} is deprecated. We changed {cpd_id} {name} to {cpd_id} Fe2+", stderr=verbose)
name = 'Fe2+'
if name.lower() == 'fe3' or name == 'fe3+' or name.lower() == 'fe+3':
log_and_message(f"Warning: {name} is deprecated. We changed {name} to Fe3+", stderr=verbose)
name = 'Fe3+'
elif 'fe3' in name.lower() and verbose:
log_and_message(f"Warning: {name} might be deprecated, we prefer Fe3+", stderr=verbose)
self.name = name
self.reactions = set()
self.model_seed_id = self.id
self.alternate_seed_ids = set()
self.abbreviation = None
self.aliases = None
self.formula = None
self.mw = 0
self.common = False
self.charge = 0
self.is_cofactor = False
self.linked_compound = False
self.pka = 0
self.pkb = 0
self.is_obsolete = False
self.abstract_compound = False
self.uptake_secretion = False
self.is_core = False
self.inchikey = 0
def __eq__(self, other):
"""
Two compounds are equal if they have the same id or the same name
:param other: The other compound
:type other: Compound
:return: If they are equal
:rtype: bool
"""
if isinstance(other, Compound):
return self.id == other.id or self.name == other.name
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other compound
:type other: Compound
:return: An int, zero if they are the same
:rtype: int
"""
if isinstance(other, Compound):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other compound
:type other: Compound
:return: If they are not equal
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash((self.id, self.name))
def __str__(self):
"""
The to string function.
:rtype: str
"""
return f"{self.id}: {self.name}"
def __iter__(self):
for i in self.__dict__.items():
yield i
def add_reactions(self, rxns):
"""
Add a reaction that this compound is involved in. You can add a set of reactions. See the note above about the
number of reactions.
:param rxns: A set of reactions
:type rxns: set
"""
if isinstance(rxns, set):
self.reactions.update(rxns)
else:
raise TypeError("You need to add a set of reactions to a compound")
def has_reaction(self, rxn):
"""
Is this compound involved in this reaction?
:param rxn: A Reaction object
:type rxn: Reaction
:return: Whether the reaction is present
:rtype: bool
"""
return rxn in self.reactions
def number_of_reactions(self):
"""
How many reactions is this compound involved in?
:rtype: int
"""
return len(self.reactions)
def all_reactions(self):
"""
Return a set of all the reactions that this compound is involved in
:rtype: Set[str]
"""
return self.reactions
def is_common(self, rct_limit=COMMON_REACTION_LIMIT):
"""
Is this a common compound? This requires that you have
added reactions to this compound.
You can either specify the number of reactions or use our
default that is currently 50.
:param rct_limit: The limit for a compound to be considered common
:type rct_limit: int
:return: Whether this is a common reaction
:rtype: bool
"""
if self.number_of_reactions() > rct_limit:
self.common = True
else:
self.common = False
return self.common
def calculate_molecular_weight(self):
"""
Calculate and return the molecular weight of this compound
:return: The molecular weight
:rtype: float
"""
raise NotImplementedError("Sorry. Calculate molecular weight has not yet been implemented.")
def add_attribute(self, key, value):
"""
Add an attribute to this class
"""
setattr(self, key, value)
def get_attribute(self, key):
"""
Retrieve an attribute
"""
return getattr(self, key)
@total_ordering
class CompoundWithLocation(Compound):
"""
Compounds can have several locations:
A compound has at the very minimum a name and a location. The location is typically one of:
* e: extracellular
* c: cytoplasmic
* h: chloroplast
* p: periplasm
We extend the Compound class to add a location, and override a few of the methods
:ivar location: the location of the compound.
"""
def __init__(self, id=None, name=None, location=None, *args, **kwargs):
"""
Initiate the object
:param compound: the parent compound. Note you should create this first if it doesn't exist!
:type cpd_id: PyFBA.metabolism.Compound
:param location: The location of the compound
:type location: str
:return:
:rtype:
"""
super(CompoundWithLocation, self).__init__(id, name, *args, **kwargs)
self.id = id
self.name = name
self.location = location
@classmethod
def from_compound(cls, compound, location):
"""Initialize this object from another compound"""
cpd = cls(compound.id, compound.name, location)
for it in compound:
cpd.add_attribute(*it)
cpd.location = location
return cpd
def __eq__(self, other):
"""
Two compounds are equal if they have the same name and the same location
:param other: The other compound
:type other: Compound
:return: If they are equal
:rtype: bool
"""
if isinstance(other, CompoundWithLocation):
return super().__eq__(other) and self.location == other.location
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __lt__(self, other):
"""
Return whether this is less than other. Note that @total_ordering will take care of all the
other comparators!
"""
return self.id < other.id
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other compound
:type other: Compound
:return: An int, zero if they are the same
:rtype: int
"""
if isinstance(other, CompoundWithLocation):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other compound
:type other: Compound
:return: If they are not equal
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash((super().__hash__(), self.location))
def __str__(self):
"""
The to string function.
:rtype: str
"""
return f"{self.id}: {self.name} (location: {self.location})"
def __getstate__(self):
state = self.__dict__.copy()
# sys.stderr.write(f"Set {state}\n")
return state
def __setstate__(self, state):
# correctly handle unpickling
# sys.stderr.write(f"Read {state}\n")
self.__dict__.update(state)
def calculate_molecular_weight(self):
# this is here because the subclass should implement unimplemented methods otherwise it is abstract
# and I don't want to!
raise NotImplementedError("Sorry. Calculate molecular weight has not yet been implemented.")
| {
"repo_name": "linsalrob/PyFBA",
"path": "PyFBA/metabolism/compound.py",
"copies": "1",
"size": "10361",
"license": "mit",
"hash": -5854328644900477000,
"line_mean": 29.3841642229,
"line_max": 122,
"alpha_frac": 0.5802528713,
"autogenerated": false,
"ratio": 4.2884933774834435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5368746248783444,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from beautiful_date import BeautifulDate
from tzlocal import get_localzone
from datetime import datetime, date, timedelta
from .attachment import Attachment
from .attendee import Attendee
from .reminders import PopupReminder, EmailReminder
from .util.date_time_util import insure_localisation
class Visibility:
""" Possible values of the event visibility.
DEFAULT - Uses the default visibility for events on the calendar. This is the default value.
PUBLIC - The event is public and event details are visible to all readers of the calendar.
PRIVATE - The event is private and only event attendees may view event details.
"""
DEFAULT = "default"
PUBLIC = "public"
PRIVATE = "private"
@total_ordering
class Event:
def __init__(self,
summary,
start,
end=None,
timezone=str(get_localzone()),
event_id=None,
description=None,
location=None,
recurrence=None,
color=None,
visibility=Visibility.DEFAULT,
attendees=None,
gadget=None,
attachments=None,
reminders=None,
default_reminders=False,
minutes_before_popup_reminder=None,
minutes_before_email_reminder=None,
**other):
"""
:param summary:
title of the event.
:param start:
starting date/datetime.
:param end:
ending date/datetime. If 'end' is not specified, event is considered as a 1-day or 1-hour event
if 'start' is date or datetime respectively.
:param timezone:
timezone formatted as an IANA Time Zone Database name, e.g. "Europe/Zurich". By default,
the computers local timezone is used if it is configured. UTC is used otherwise.
:param event_id:
opaque identifier of the event. By default is generated by the server. You can specify id as a
5-1024 long string of characters used in base32hex ([a-vA-V0-9]). The ID must be unique per
calendar.
:param description:
description of the event.
:param location:
geographic location of the event as free-form text.
:param recurrence:
RRULE/RDATE/EXRULE/EXDATE string or list of such strings. See :py:mod:`~gcsa.recurrence`
:param color:
color id referring to an entry from colors endpoint (list_event_colors)
:param visibility:
visibility of the event. Default is default visibility for events on the calendar.
:param attendees:
attendee or list of attendees. See :py:class:`~gcsa.attendee.Attendee`.
Each attendee may be given as email string or :py:class:`~gcsa.attendee.Attendee` object.
:param gadget:
a gadget that extends the event. See :py:class:`~gcsa.gadget.Gadget`
:param attachments:
attachment or list of attachments. See :py:class:`~gcsa.attachment.Attachment`
:param reminders:
reminder or list of reminder objects. See :py:mod:`~gcsa.reminders`
:param default_reminders:
whether the default reminders of the calendar apply to the event.
:param minutes_before_popup_reminder:
minutes before popup reminder or None if reminder is not needed.
:param minutes_before_email_reminder:
minutes before email reminder or None if reminder is not needed.
:param other:
Other fields that should be included in request json. Will be included as they are.
"""
def assure_list(obj):
return [] if obj is None else obj if isinstance(obj, list) else [obj]
self.timezone = timezone
self.start = start
if end:
self.end = end
elif isinstance(start, datetime):
self.end = start + timedelta(hours=1)
elif isinstance(start, date):
self.end = start + timedelta(days=1)
if isinstance(self.start, datetime) and isinstance(self.end, datetime):
self.start = insure_localisation(self.start, timezone)
self.end = insure_localisation(self.end, timezone)
elif isinstance(self.start, datetime) or isinstance(self.end, datetime):
raise TypeError('Start and end must either both be date or both be datetime.')
def insure_date(d):
"""Converts d to date if it is of type BeautifulDate."""
if isinstance(d, BeautifulDate):
return date(year=d.year, month=d.month, day=d.day)
else:
return d
self.start = insure_date(self.start)
self.end = insure_date(self.end)
attendees = [self._ensure_attendee_from_email(a) for a in assure_list(attendees)]
reminders = assure_list(reminders)
if len(reminders) > 5:
raise ValueError('The maximum number of override reminders is 5.')
if default_reminders and reminders:
raise ValueError('Cannot specify both default reminders and overrides at the same time.')
self.event_id = event_id and event_id.lower()
self.summary = summary
self.description = description
self.location = location
self.recurrence = assure_list(recurrence)
self.color_id = color
self.visibility = visibility
self.attendees = attendees
self.gadget = gadget
self.attachments = assure_list(attachments)
self.reminders = reminders
self.default_reminders = default_reminders
self.other = other
if minutes_before_popup_reminder is not None:
self.add_popup_reminder(minutes_before_popup_reminder)
if minutes_before_email_reminder is not None:
self.add_email_reminder(minutes_before_email_reminder)
@property
def id(self):
return self.event_id
def add_attendee(self, attendee):
"""Adds attendee to an event. See :py:class:`~gcsa.attendee.Attendee`.
Attendee may be given as email string or :py:class:`~gcsa.attendee.Attendee` object."""
self.attendees.append(self._ensure_attendee_from_email(attendee))
def add_attachment(self, file_url, title, mime_type):
"""Adds attachment to an event. See :py:class:`~gcsa.attachment.Attachment`"""
self.attachments.append(Attachment(title=title, file_url=file_url, mime_type=mime_type))
def add_email_reminder(self, minutes_before_start=60):
"""Adds email reminder to an event. See :py:class:`~gcsa.reminders.EmailReminder`"""
self.add_reminder(EmailReminder(minutes_before_start))
def add_popup_reminder(self, minutes_before_start=30):
"""Adds popup reminder to an event. See :py:class:`~gcsa.reminders.PopupReminder`"""
self.add_reminder(PopupReminder(minutes_before_start))
def add_reminder(self, reminder):
"""Adds reminder to an event. See :py:mod:`~gcsa.reminders`"""
if len(self.reminders) > 4:
raise ValueError('The maximum number of override reminders is 5.')
self.reminders.append(reminder)
@staticmethod
def _ensure_attendee_from_email(attendee_or_email):
"""If attendee_or_email is email string, returns created :py:class:`~gcsa.attendee.Attendee`
object with the given email."""
if isinstance(attendee_or_email, str):
return Attendee(email=attendee_or_email)
else:
return attendee_or_email
def __str__(self):
return '{} - {}'.format(self.start, self.summary)
def __repr__(self):
return '<Event {} - {}>'.format(self.start, self.summary)
def __lt__(self, other):
def insure_datetime(d, timezone):
if type(d) == date:
return insure_localisation(datetime(year=d.year, month=d.month, day=d.day), timezone)
else:
return d
start = insure_datetime(self.start, self.timezone)
end = insure_datetime(self.end, self.timezone)
other_start = insure_datetime(other.start, other.timezone)
other_end = insure_datetime(other.end, other.timezone)
return (start, end) < (other_start, other_end)
def __eq__(self, other):
return isinstance(other, Event) \
and self.start == other.start \
and self.end == other.end \
and self.event_id == other.event_id \
and self.summary == other.summary \
and self.description == other.description \
and self.location == other.location \
and self.recurrence == other.recurrence \
and self.color_id == other.color_id \
and self.visibility == other.visibility \
and self.attendees == other.attendees \
and self.gadget == other.gadget \
and self.attachments == other.attachments \
and self.reminders == other.reminders \
and self.default_reminders == other.default_reminders \
and self.other == other.other
| {
"repo_name": "kuzmoyev/Google-Calendar-Simple-API",
"path": "gcsa/event.py",
"copies": "1",
"size": "9390",
"license": "mit",
"hash": 3154361507424304600,
"line_mean": 41.6818181818,
"line_max": 111,
"alpha_frac": 0.6121405751,
"autogenerated": false,
"ratio": 4.153029632905794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265170208005794,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from cffi import FFI
ffi = FFI()
ffi.cdef("""
void long_store(long *, long *);
long long_add_and_fetch(long *, long);
long long_sub_and_fetch(long *, long);
long long_get_and_set(long *, long);
long long_compare_and_set(long *, long *, long);
""")
atomic = ffi.verify("""
void long_store(long *v, long *n) {
__atomic_store(v, n, __ATOMIC_SEQ_CST);
};
long long_add_and_fetch(long *v, long i) {
return __atomic_add_fetch(v, i, __ATOMIC_SEQ_CST);
};
long long_sub_and_fetch(long *v, long i) {
return __atomic_sub_fetch(v, i, __ATOMIC_SEQ_CST);
};
long long_get_and_set(long *v, long n) {
return __atomic_exchange_n(v, n, __ATOMIC_SEQ_CST);
};
long long_compare_and_set(long *v, long *e, long n) {
return __atomic_compare_exchange_n(v, e, n, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
};
""")
@total_ordering
class AtomicLong(object):
"""
An atomic class that guarantees atomic updates to its contained integer value.
"""
def __init__(self, value=None):
"""
Creates a new AtomicLong with the given initial value.
:param value: initial value
"""
self._value = ffi.new('long *', value)
def __repr__(self):
return '<{0} at 0x{1:x}: {2!r}>'.format(
self.__class__.__name__, id(self), self.value)
@property
def value(self):
return self._value[0]
@value.setter
def value(self, new):
atomic.long_store(self._value, ffi.new('long *', new))
def __iadd__(self, inc):
atomic.long_add_and_fetch(self._value, inc)
return self
def __isub__(self, dec):
atomic.long_sub_and_fetch(self._value, dec)
return self
def get_and_set(self, new_value):
"""Atomically sets to the given value and returns the old value
:param new_value: the new value
"""
return atomic.long_get_and_set(self._value, new_value)
def swap(self, new_value):
return self.get_and_set(new_value)
def compare_and_set(self, expect_value, new_value):
"""
Atomically sets the value to the given value if the current value is
equal to the expected value.
:param expect_value: the expected value
:param new_value: the new value
"""
return bool(atomic.long_compare_and_set(self._value, ffi.new('long *', expect_value), new_value))
def compare_and_swap(self, expect_value, new_value):
return self.compare_and_set(expect_value, new_value)
def __eq__(self, a):
if self is a:
return True
elif isinstance(a, AtomicLong):
return self.value == a.value
else:
return self.value == a
def __ne__(self, a):
return not (self == a)
def __lt__(self, a):
if self is a:
return False
elif isinstance(a, AtomicLong):
return self.value < a.value
else:
return self.value < a
class AtomicLongArray(object):
"""
An atomic class that guarantees atomic updates to its contained integer values.
"""
def __init__(self, array=[]):
"""
Creates a new AtomicLongArray with the given initial array of integers.
:param array: initial values
"""
self._array = [AtomicLong(x) for x in array]
def __repr__(self):
return '<{0} at 0x{1:x}: {2!r}>'.format(
self.__class__.__name__, id(self), self.value)
def __len__(self):
return len(self._array)
def __getitem__(self, key):
return self._array[key]
def __setitem__(self, key, value):
if isinstance(value, AtomicLong):
self._array[key] = value
else:
self._array[key].value = value
def __iter__(self):
for a in self._array:
yield a.value
@property
def value(self):
return [a.value for a in self._array]
@value.setter
def value(self, new=[]):
self._array = [AtomicLong(int(x)) for x in new]
| {
"repo_name": "cyberdelia/atomic",
"path": "atomic/__init__.py",
"copies": "1",
"size": "4040",
"license": "mit",
"hash": 4070450434608986600,
"line_mean": 26.1140939597,
"line_max": 105,
"alpha_frac": 0.5767326733,
"autogenerated": false,
"ratio": 3.467811158798283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4544543832098283,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from dark.score import HigherIsBetterScore, LowerIsBetterScore
@total_ordering
class _Base(object):
"""
Holds information about a matching region from a read alignment.
You should not use this class directly. Use one of its subclasses,
either HSP or LSP, depending on whether you want numerically higher
scores to be considered better (HSP) or worse (LSP).
Below is an example alignment to show the locations of the six
start/end offsets. The upper four are offsets into the subject. The
lower two are offsets into the read. Note that the read has two gaps
('-' characters). All offsets are zero-based and follow the Python
convention that the 'end' positions are not included in the string.
readStartInSubject readEndInSubject
| |
| |
| subjectStart subjectEnd |
| | | |
| | | |
Subject: .................ACGTAAAGGCTTAGGT.................
Read: ....ACGTA-AGGCTT-GGT............
| |
| |
readStart readEnd
Note that the above is just one alignment, and that others are possible
(e.g., with the read extending beyond the end(s) of the subject, or the
subject also with gaps in it). The point of the example diagram is to show
what the six variable names will always refer to, not to enumerate all
possible alignments (the tests in test/blast/test_hsp.py go through
many different cases). The classes in this file are just designed to hold
the variables associated with an HSP and to make it easy to compare them.
@param readStart: The offset in the read where the match begins.
@param readEnd: The offset in the read where the match ends.
@param readStartInSubject: The offset in the subject where the match of
the read starts.
@param readEndInSubject: The offset in the subject where the match of
the read ends.
@param readFrame: The reading frame for the read, a value from
{-3, -2, -1, 1, 2, 3} where the sign indicates negative or positive
sense.
@param subjectStart: The offset in the subject where the match begins.
@param subjectEnd: The offset in the subject where the match ends.
@param subjectFrame: The reading frame for the subject, a value from
{-3, -2, -1, 1, 2, 3} where the sign indicates negative or positive
sense.
@param readMatchedSequence: The matched part of the read. Note that
this may contain gaps (marked with '-').
@param subjectMatchedSequence: The matched part of the subject. Note that
this may contain gaps (marked with '-').
@param identicalCount: The C{int} number of positions at which the subject
and query were identical.
@param positiveCount: The C{int} number of positions at which the subject
and query had a positive score in the scoring matrix used during
matching (this is probably only different from the C{identicalCount}
when matching amino acids (i.e., not nucleotides).
"""
def __init__(self, readStart=None, readEnd=None, readStartInSubject=None,
readEndInSubject=None, readFrame=None, subjectStart=None,
subjectEnd=None, subjectFrame=None, readMatchedSequence=None,
subjectMatchedSequence=None, identicalCount=None,
positiveCount=None):
self.readStart = readStart
self.readEnd = readEnd
self.readStartInSubject = readStartInSubject
self.readEndInSubject = readEndInSubject
self.readFrame = readFrame
self.subjectStart = subjectStart
self.subjectEnd = subjectEnd
self.subjectFrame = subjectFrame
self.readMatchedSequence = readMatchedSequence
self.subjectMatchedSequence = subjectMatchedSequence
self.identicalCount = identicalCount
self.positiveCount = positiveCount
def __lt__(self, other):
return self.score < other.score
def __eq__(self, other):
return self.score == other.score
def betterThan(self, score):
"""
Compare this instance's score with another score.
@param score: A C{float} score.
@return: A C{bool}, C{True} if this score is the better.
"""
return self.score.betterThan(score)
class HSP(_Base):
"""
Holds information about a high-scoring pair from a read alignment.
Comparisons are done as for BLAST or DIAMOND bit scores (higher is better).
@param score: The numeric score of this HSP.
"""
def __init__(self, score, **kwargs):
_Base.__init__(self, **kwargs)
self.score = HigherIsBetterScore(score)
class LSP(_Base):
"""
Holds information about a low-scoring pair from a read alignment.
Comparisons are done as for BLAST or DIAMOND e-values (smaller is better).
@param score: The numeric score of this LSP.
"""
def __init__(self, score, **kwargs):
_Base.__init__(self, **kwargs)
self.score = LowerIsBetterScore(score)
| {
"repo_name": "bamueh/dark-matter",
"path": "dark/hsp.py",
"copies": "1",
"size": "5426",
"license": "mit",
"hash": -6430252448699756000,
"line_mean": 43.1138211382,
"line_max": 79,
"alpha_frac": 0.6249539255,
"autogenerated": false,
"ratio": 4.411382113821138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 123
} |
from functools import total_ordering
from django.db import models
from django.db.models.query import QuerySet
from django.utils.formats import date_format
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
@total_ordering
class AbstractBaseModel(models.Model):
"""Base class for all models in this project"""
class Meta:
abstract = True
def __lt__(self, other):
"""Sort by PK"""
# https://github.com/hnec-vr/libya-elections/issues/1130
if isinstance(other, type(self)):
return self.pk < other.pk
else:
return str(self) < str(other)
class TrashBinManager(models.Manager):
queryset = QuerySet
def get_queryset(self):
"""Default queries return only undeleted objects."""
return self.queryset(self.model, using=self._db).filter(deleted=False)
# For any old code that still calls `get_query_set`:
get_query_set = get_queryset
def unfiltered(self, using=None):
"""Return a qs of all objects, deleted and undeleted."""
if not using:
using = self._db
return self.queryset(self.model, using=using).all()
def deleted(self):
"""Return a qs of all deleted objects."""
return self.unfiltered(using=self._db).filter(deleted=True)
class AbstractTrashBinModel(AbstractBaseModel):
deleted = models.BooleanField(_('deleted'), default=False)
objects = TrashBinManager()
class Meta:
abstract = True
def soft_delete(self):
"""Set deleted=True. Does not explicitly change any other fields,
though AbstractTimestampModel's save() will update modification_date too."""
self.deleted = True
self.save(update_fields=['deleted'])
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
Override superclass method to force use of the unfiltered queryset when
trying to update records. Otherwise, Django uses the default manager
(TrashBinManager) which filters out deleted records, making it
impossible to update deleted records.
"""
base_qs = self.__class__.objects.unfiltered(using=using)
return super(AbstractTrashBinModel, self)._do_update(
base_qs, using, pk_val, values, update_fields, forced_update)
class AbstractTimestampModel(AbstractBaseModel):
creation_date = models.DateTimeField(_('creation date'), default=now, editable=False)
modification_date = models.DateTimeField(_('modification date'), default=now, editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
self.modification_date = now()
# Make sure we save the updated modification_date, even if the save
# call is using `update_fields`
if 'update_fields' in kwargs and 'modification_date' not in kwargs['update_fields']:
kwargs['update_fields'].append('modification_date')
super(AbstractTimestampModel, self).save(*args, **kwargs)
@property
def formatted_creation_date(self):
return date_format(self.creation_date, "SHORT_DATETIME_FORMAT")
@property
def formatted_modification_date(self):
return date_format(self.modification_date, "SHORT_DATETIME_FORMAT")
class AbstractTimestampTrashBinModel(AbstractTimestampModel, AbstractTrashBinModel):
class Meta:
abstract = True
class ArchivableTrashBinManager(TrashBinManager):
def get_queryset(self):
"""Default queries return only undeleted, unarchived objects."""
return self.queryset(self.model, using=self._db).filter(deleted=False, archive_time=None)
# For any old code that still calls `get_query_set`
get_query_set = get_queryset
def unfiltered(self, using=None):
"""Return a qs of all objects, deleted and undeleted, archived and not archived."""
if not using:
using = self._db
return self.queryset(self.model, using=using).all()
def archived(self, using=None):
"""Return a qs of all archived objects, deleted or not."""
return self.unfiltered(using=using).exclude(archive_time=None)
class AbstractArchivableTimestampTrashBinModel(AbstractTimestampTrashBinModel):
archive_time = models.DateTimeField(
_('archive time'),
default=None,
null=True,
blank=True,
help_text=_("If non-NULL, from this time on, this record is no longer in effect.")
)
objects = ArchivableTrashBinManager()
class Meta:
abstract = True
def save_with_archive_version(self):
"""
Make an archive copy of what's currently in the database for this record,
then save this one, updating the creation_date to indicate this version starts
being valid now.
"""
archive = type(self).objects.get(pk=self.pk)
archive.pk = None
archive.archive_time = now()
archive.save()
# Update this one's creation time as the archive record's archive time,
# so there's a continuous time when one record or the other was valid.
self.creation_date = archive.archive_time
self.save()
@property
def formatted_archive_time(self):
return date_format(self.archive_time, "SHORT_DATETIME_FORMAT") if self.archive_time else ''
| {
"repo_name": "SmartElect/SmartElect",
"path": "libya_elections/abstract.py",
"copies": "1",
"size": "5388",
"license": "apache-2.0",
"hash": -1950568174897603800,
"line_mean": 34.2156862745,
"line_max": 99,
"alpha_frac": 0.6690794358,
"autogenerated": false,
"ratio": 4.199532346063913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008674049784835783,
"num_lines": 153
} |
from functools import total_ordering
from django.db.migrations.state import ProjectState
from .exceptions import CircularDependencyError, NodeNotFoundError
@total_ordering
class Node:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
class DummyNode(Node):
"""
A node that doesn't correspond to a migration file on disk.
(A squashed migration that was removed, for example.)
After the migration graph is processed, all dummy nodes should be removed.
If there are any left, a nonexistent dependency error is raised.
"""
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
"""
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterwards.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
"""
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement,),
replacement
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
def validate_consistency(self):
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
"""
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
"""
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
"""Iterative depth-first search for finding dependencies."""
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [(n, False) for n in sorted(node.parents if forwards else node.children)]
return visited
def root_nodes(self, app=None):
"""
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
# Algo from GvR:
# https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
# Use child.key instead of child to speed up the frequent
# hashing.
node = child.key
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| {
"repo_name": "wkschwartz/django",
"path": "django/db/migrations/graph.py",
"copies": "69",
"size": "12841",
"license": "bsd-3-clause",
"hash": 8419318291669255000,
"line_mean": 39.2539184953,
"line_max": 110,
"alpha_frac": 0.5967603769,
"autogenerated": false,
"ratio": 4.440179806362379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from django.db.models import signals
from django.db.models.fields import BLANK_CHOICE_DASH
from django.conf import settings
from django.forms import fields
from django.db.models.options import Options
from django.core.exceptions import ValidationError
from neomodel import RequiredProperty, DeflateError, StructuredNode
__author__ = 'Robin Edwards'
__email__ = 'robin.ge@gmail.com'
__license__ = 'MIT'
__package__ = 'django_neomodel'
__version__ = '0.0.4'
default_app_config = 'django_neomodel.apps.NeomodelConfig'
def classproperty(f):
class cpf(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, obj, type=None):
return self.getter(type)
return cpf(f)
@total_ordering
class DjangoField(object):
"""
Fake Django model field object which wraps a neomodel Property
"""
is_relation = False
concrete = True
editable = True
creation_counter = 0
def __init__(self, prop, name):
self.prop = prop
self.name = name
self.help_text = getattr(prop, 'help_text', '')
self.primary_key = getattr(prop, 'primary_key', False)
self.label = prop.label if prop.label else name
form_cls = getattr(prop, 'form_field_class', 'Field') # get field string
self.form_class = getattr(fields, form_cls, fields.CharField)
self._has_default = prop.has_default
self.required = prop.required
self.blank = not self.required
self.choices = getattr(prop, 'choices', None)
self.creation_counter = DjangoField.creation_counter
DjangoField.creation_counter += 1
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, DjangoField):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, DjangoField):
return self.creation_counter < other.creation_counter
return NotImplemented
def has_default(self):
return self._has_default
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def value_from_object(self, instance):
return getattr(instance, self.name)
def formfield(self, **kwargs):
"""
Returns a django.forms.Field instance for this database Property.
"""
defaults = {'required': self.required,
'label': self.label or self.name,
'help_text': self.help_text}
if self.has_default():
defaults['initial'] = self.prop.default_value()
if self.choices:
# Fields with choices get special treatment.
include_blank = (not self.required or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return self.form_class(**defaults)
def to_python(self, value):
return value
def get_choices(self, include_blank=True):
blank_defined = False
blank_choice = BLANK_CHOICE_DASH
choices = list(self.choices) if self.choices else []
if issubclass(type(self.choices), dict):
choices = list(enumerate(self.choices))
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
return first_choice + choices
class DjangoNode(StructuredNode):
__abstract_node__ = True
@classproperty
def _meta(self):
if hasattr(self.Meta, 'unique_together'):
raise NotImplementedError('unique_together property not supported by neomodel')
opts = Options(self.Meta, app_label=self.Meta.app_label)
opts.contribute_to_class(self.__class__, self.__class__.__name__)
for key, prop in self.__all_properties__:
opts.add_field(DjangoField(prop, key), getattr(prop, 'private', False))
return opts
def full_clean(self, exclude, validate_unique=False):
"""
Validate node, on error raising ValidationErrors which can be handled by django forms
:param exclude:
:param validate_unique: Check if conflicting node exists in the labels indexes
:return:
"""
# validate against neomodel
try:
self.deflate(self.__properties__, self)
except DeflateError as e:
raise ValidationError({e.property_name: e.msg})
except RequiredProperty as e:
raise ValidationError({e.property_name: 'is required'})
def validate_unique(self, exclude):
# get unique indexed properties
unique_props = []
for k, p in self.__class__.defined_properties(aliases=False, rels=False).items():
if k not in exclude and p.unique_index:
unique_props.append(k)
cls = self.__class__
props = self.__properties__
# see if any nodes already exist with each property
for key in unique_props:
val = getattr(self.__class__, key).deflate(props[key])
node = cls.nodes.get_or_none(**{key: val})
# if exists and not this node
if node and node.id != getattr(self, 'id', None):
raise ValidationError({key, 'already exists'})
def pre_save(self):
if getattr(settings, 'NEOMODEL_SIGNALS', True):
self._creating_node = getattr(self, 'id', None) is None
signals.pre_save.send(sender=self.__class__, instance=self)
def post_save(self):
if getattr(settings, 'NEOMODEL_SIGNALS', True):
created = self._creating_node
delattr(self, '_creating_node')
signals.post_save.send(sender=self.__class__, instance=self, created=created)
def pre_delete(self):
if getattr(settings, 'NEOMODEL_SIGNALS', True):
signals.pre_delete.send(sender=self.__class__, instance=self)
def post_delete(self):
if getattr(settings, 'NEOMODEL_SIGNALS', True):
signals.post_delete.send(sender=self.__class__, instance=self)
| {
"repo_name": "robinedwards/django-neomodel",
"path": "django_neomodel/__init__.py",
"copies": "1",
"size": "6988",
"license": "mit",
"hash": -4953945009222363000,
"line_mean": 32.9223300971,
"line_max": 93,
"alpha_frac": 0.6074699485,
"autogenerated": false,
"ratio": 4.207104154124021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5314574102624021,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.html import format_html
from wagtail.core import hooks
@total_ordering
class Button:
show = True
def __init__(self, label, url, classes=set(), attrs={}, priority=1000):
self.label = label
self.url = url
self.classes = classes
self.attrs = attrs.copy()
self.priority = priority
def render(self):
attrs = {'href': self.url, 'class': ' '.join(sorted(self.classes))}
attrs.update(self.attrs)
return format_html('<a{}>{}</a>', flatatt(attrs), self.label)
def __str__(self):
return self.render()
def __repr__(self):
return '<Button: {}>'.format(self.label)
def __lt__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.label == other.label
and self.url == other.url
and self.classes == other.classes
and self.attrs == other.attrs
and self.priority == other.priority)
# Base class for all listing buttons
# This is also used by SnippetListingButton defined in wagtail.snippets.widgets
class ListingButton(Button):
def __init__(self, label, url, classes=set(), **kwargs):
classes = {'button', 'button-small', 'button-secondary'} | set(classes)
super().__init__(label, url, classes=classes, **kwargs)
class PageListingButton(ListingButton):
pass
class BaseDropdownMenuButton(Button):
def __init__(self, *args, **kwargs):
super().__init__(*args, url=None, **kwargs)
@cached_property
def dropdown_buttons(self):
raise NotImplementedError
def render(self):
return render_to_string(self.template_name, {
'buttons': self.dropdown_buttons,
'label': self.label,
'title': self.attrs.get('title'),
'is_parent': self.is_parent})
class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
template_name = 'wagtailadmin/pages/listing/_button_with_dropdown.html'
def __init__(self, label, hook_name, page, page_perms, is_parent, next_url=None, **kwargs):
self.hook_name = hook_name
self.page = page
self.page_perms = page_perms
self.is_parent = is_parent
self.next_url = next_url
super().__init__(label, **kwargs)
@property
def show(self):
return bool(self.dropdown_buttons)
@cached_property
def dropdown_buttons(self):
button_hooks = hooks.get_hooks(self.hook_name)
buttons = []
for hook in button_hooks:
buttons.extend(hook(self.page, self.page_perms, self.is_parent, self.next_url))
buttons.sort()
return buttons
| {
"repo_name": "kaedroho/wagtail",
"path": "wagtail/admin/widgets/button.py",
"copies": "7",
"size": "3069",
"license": "bsd-3-clause",
"hash": 1509065836035939800,
"line_mean": 29.3861386139,
"line_max": 95,
"alpha_frac": 0.6217008798,
"autogenerated": false,
"ratio": 3.9498069498069497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.807150782960695,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from ._funcs import astuple
from ._make import attrib, attrs
@total_ordering
@attrs(eq=False, order=False, slots=True, frozen=True)
class VersionInfo:
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3])
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them
| {
"repo_name": "pegasus-isi/pegasus",
"path": "packages/pegasus-common/src/Pegasus/vendor/attr/_version_info.py",
"copies": "1",
"size": "2066",
"license": "apache-2.0",
"hash": 165714767652856130,
"line_mean": 24.5061728395,
"line_max": 87,
"alpha_frac": 0.5498547919,
"autogenerated": false,
"ratio": 3.7908256880733946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4840680479973395,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from hashids import Hashids
def _is_uint(number):
"""Returns whether a value is an unsigned integer."""
try:
return number == int(number) and number >= 0
except ValueError:
return False
def _is_str(candidate):
"""Returns whether a value is a string."""
return isinstance(candidate, str)
@total_ordering
class Hashid(object):
def __init__(self, value, salt="", min_length=0, alphabet=Hashids.ALPHABET, prefix="", hashids=None):
if hashids is None:
self._salt = salt
self._min_length = min_length
self._alphabet = alphabet
self._hashids = Hashids(salt=self._salt, min_length=self._min_length, alphabet=self._alphabet)
else:
self._hashids = hashids
self._salt = hashids._salt
self._min_length = hashids._min_length
self._alphabet = hashids._alphabet
self._prefix = str(prefix)
if value is None:
raise ValueError("id must be a positive integer or a valid Hashid string")
# Check if `value` is an integer first as it is much faster than checking if a string is a valid hashid
if _is_uint(value):
self._id = value
self._hashid = self.encode(value)
elif _is_str(value):
# `value` could be a string representation of an integer and not a hashid, but since the Hashids algorithm
# requires a minimum of 16 characters in the alphabet, `int(value, base=10)` will always throw a ValueError
# for a hashids string, as it's impossible to represent a hashids string with only chars [0-9].
try:
value = int(value, base=10)
except (TypeError, ValueError):
# We must assume that this string is a hashids representation.
# Verify that it begins with the prefix, which could be the default ""
if not value.startswith(self._prefix):
raise ValueError("value must begin with prefix {}".format(self._prefix))
without_prefix = value[len(self._prefix):]
_id = self.decode(without_prefix)
if _id is None:
raise ValueError("id must be a positive integer or a valid Hashid string")
else:
self._id = _id
self._hashid = without_prefix
else:
if not _is_uint(value):
raise ValueError("value must be a positive integer")
# Finally, set our internal values
self._id = value
self._hashid = self.encode(value)
elif isinstance(value, int) and value < 0:
raise ValueError("value must be a positive integer")
else:
raise ValueError("value must be a positive integer or a valid Hashid string")
@property
def id(self):
return self._id
@property
def hashid(self):
return self._hashid
@property
def prefix(self):
return self._prefix
@property
def hashids(self):
return self._hashids
def encode(self, id):
return self._hashids.encode(id)
def decode(self, hashid):
ret = self._hashids.decode(hashid)
if len(ret) == 1:
return ret[0]
else:
return None
def __repr__(self):
return "Hashid({}): {}".format(self._id, str(self))
def __str__(self):
return self._prefix + self._hashid
def __int__(self):
return self._id
def __long__(self):
return int(self._id)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self._id == other._id and
self._hashid == other._hashid and
self._prefix == other._prefix
)
if isinstance(other, str):
return str(self) == other
if isinstance(other, int):
return int(self) == other
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self._id < other._id
if isinstance(other, type(self._id)):
return self._id < other
return NotImplemented
def __len__(self):
return len(str(self))
def __hash__(self):
return hash(str(self))
def __reduce__(self):
return (self.__class__, (self._id, self._salt, self._min_length, self._alphabet, self._prefix, None))
| {
"repo_name": "nshafer/django-hashid-field",
"path": "hashid_field/hashid.py",
"copies": "1",
"size": "4573",
"license": "mit",
"hash": 5599622811383390000,
"line_mean": 32.1376811594,
"line_max": 119,
"alpha_frac": 0.560026241,
"autogenerated": false,
"ratio": 4.384467881112176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5444494122112176,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from swimlane.core.cursor import Cursor
from swimlane.core.resolver import SwimlaneResolver
from swimlane.core.resources.base import APIResource
# pylint: disable=abstract-method
@total_ordering
class UserGroup(APIResource):
"""Base class for Users and Groups
Notes:
Returned in some places where determining whether object is a User or Group is not possible without additional
requests. Use appropriate adapter on `swimlane` client to retrieve more specific instance using `id` as needed
Can be compared to User or Group instances directly without ensuring the classes are the same
Attributes:
id (str): Full user/group ID
name (str): User/group name
"""
def __init__(self, swimlane, raw):
super(UserGroup, self).__init__(swimlane, raw)
self.id = self._raw['id']
self.name = self._raw['name']
def __str__(self):
return self.name
def __hash__(self):
return hash((self.id, self.name))
def __eq__(self, other):
"""Override to allow equality comparisons across UserGroup, User, and Group instances"""
return isinstance(other, UserGroup) and hash(self) == hash(other)
def __lt__(self, other):
if not isinstance(other, UserGroup):
raise TypeError("Comparisons not supported between instances of '{}' and '{}'".format(
other.__class__.__name__,
self.__class__.__name__
))
return self.name < other.name
def resolve(self):
"""Retrieve and return correct User or Group instance from UserGroup
.. versionadded:: 2.16.1
Returns:
User | Group: Resolved User or Group instance
"""
# Skip resolving if not a generic instance
if self.__class__ is not UserGroup:
return self
else:
try:
return self._swimlane.users.get(id=self.id)
except ValueError:
return self._swimlane.groups.get(id=self.id)
def as_usergroup_selection(self):
"""Converts UserGroup to raw UserGroupSelection for populating record
Returns:
dict: Formatted UserGroup data as used by selection fields
"""
return {
'$type': 'Core.Models.Utilities.UserGroupSelection, Core',
'id': self.id,
'name': self.name
}
def for_json(self):
"""Get JSON-compatible representation"""
return {
'id': self.id,
'name': self.name
}
class Group(UserGroup):
"""Swimlane group record
Attributes:
description (str): Group description
users (GroupUsersCursor): List of users belonging to group.
"""
_type = 'Core.Models.Groups.Group, Core'
def __init__(self, swimlane, raw):
super(Group, self).__init__(swimlane, raw)
self.__user_ids = [item['id'] for item in self._raw.get('users')]
self.description = self._raw.get('description')
self.__users = None
@property
def users(self):
"""Returns a GroupUsersCursor with list of User instances for this Group
.. versionadded:: 2.16.2
"""
if self.__users is None:
self.__users = GroupUsersCursor(swimlane=self._swimlane, user_ids=self.__user_ids)
return self.__users
def get_cache_index_keys(self):
return {
'id': self.id,
'name': self.name
}
class User(UserGroup):
"""Swimlane user record
Attributes:
username (str): Unique username
display_name (str): User display name
email (str): User email
"""
_type = 'Core.Models.Identity.ApplicationUser, Core'
def __init__(self, swimlane, raw):
super(User, self).__init__(swimlane, raw)
self.username = self._raw.get('userName')
self.display_name = self._raw.get('displayName')
self.email = self._raw.get('email')
def get_cache_index_keys(self):
return {
'id': self.id,
'username': self.username,
'display_name': self.display_name
}
class GroupUsersCursor(SwimlaneResolver, Cursor):
"""Handles retrieval for user endpoint"""
def __init__(self, swimlane, user_ids):
SwimlaneResolver.__init__(self, swimlane)
Cursor.__init__(self)
self.__user_ids = user_ids
def _evaluate(self):
"""Lazily retrieve and build User instances from returned data"""
if self._elements:
for element in self._elements:
yield element
else:
for user_id in self.__user_ids:
element = self._swimlane.users.get(id=user_id)
self._elements.append(element)
yield element
| {
"repo_name": "Swimlane/sw-python-client",
"path": "swimlane/core/resources/usergroup.py",
"copies": "1",
"size": "4863",
"license": "mit",
"hash": -26246746674710340,
"line_mean": 28.8343558282,
"line_max": 118,
"alpha_frac": 0.5909932141,
"autogenerated": false,
"ratio": 4.167095115681234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5258088329781234,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import base
from fito.specs.utils import is_iterable, general_iterator
# it's a constant that is different from every other object
_no_default = object()
class MockIterable(object):
def __len__(self): return
def __getitem__(self, _): return
def __setitem__(self, _, __): return
def __delitem__(self, _): return
def __reversed__(self): return
def __contains__(self, _): return
def __setslice__(self, _, __, ___): return
def __delslice__(self, _, __): return
def iteritems(self): return
class Field(object):
"""
Base class for field definition on an :py:class:`Spec`
"""
def __init__(self, pos=None, default=_no_default, serialize=True, *args, **kwargs):
"""
:param pos: The position on the argument list
:param default: The default value
:param serialize: Whether to include this field in the serialization. A side effect of this field is
that when set to False, this field is not considered when comparing two specs
:param args: Helps having them to create on the fly sublcasses of field. See :py:func:Spec:
:param kwargs:
"""
self.pos = pos
self.default = default
self.serialize = serialize
@property
def allowed_types(self):
raise NotImplementedError()
def check_valid_value(self, value):
return any([isinstance(value, t) for t in self.allowed_types])
def __eq__(self, other):
return self is other
def __repr__(self):
args = []
if self.pos is not None: args.append('{}'.format(self.pos))
if self.default is not _no_default: args.append('default={}'.format(self.default))
if not self.serialize: args.append('serialize={}'.format(self.serialize))
return '{}({})'.format(type(self).__name__, ', '.join(args))
def has_default_value(self):
return self.default is not _no_default
class PrimitiveField(Field):
"""
Specifies a Field whose value is going to be a python object
"""
@property
def allowed_types(self):
return [object]
class CollectionField(PrimitiveField, MockIterable):
@property
def allowed_types(self):
return list, dict, tuple
@total_ordering
class NumericField(PrimitiveField):
def __lt__(self, _): return
def __add__(self, _): return
def __sub__(self, other): return
def __mul__(self, other): return
def __floordiv__(self, other): return
def __mod__(self, other): return
def __divmod__(self, other): return
def __pow__(self, _, modulo=None): return
def __lshift__(self, other): return
def __rshift__(self, other): return
def __and__(self, other): return
def __xor__(self, other): return
def __or__(self, other): return
@property
def allowed_types(self):
return int, float
class BaseSpecField(Field):
"""
Specifies a Field whose value will be an Spec
"""
def __init__(self, pos=None, default=_no_default, base_type=None, serialize=True, *args, **kwargs):
super(BaseSpecField, self).__init__(pos=pos, default=default, serialize=serialize, *args, **kwargs)
self.base_type = base_type or base.Spec
self.serialize = serialize
@property
def allowed_types(self):
return [self.base_type]
def SpecField(pos=None, default=_no_default, base_type=None, serialize=True, spec_field_subclass=None):
"""
Builds a SpecField
:param pos: Position on *args
:param default: Default value
:param base_type: Base type, it does some type checkig + avoids some warnings from IntelliJ
:param serialize: Whether this spec field should be included in the serialization of the object
:param spec_field_subclass: Sublcass of BaseSpecField, useful to extend the lib
:return:
"""
if not serialize and default is _no_default:
raise RuntimeError("If serialize == False, the field should have a default value")
spec_field_subclass = spec_field_subclass or BaseSpecField
if base_type is not None:
assert issubclass(base_type, base.Spec)
return_type = type(
'SpecFieldFor{}'.format(base_type.__name__),
(spec_field_subclass, base_type),
{}
)
else:
return_type = spec_field_subclass
return return_type(pos=pos, default=default, base_type=base_type, serialize=serialize)
class SpecCollection(Field, MockIterable):
"""
Specifies a Field whose value is going to be a collection of specs
"""
@property
def allowed_types(self):
return list, dict, tuple
def check_valid_value(self, value):
if not is_iterable(value): return False
for k, v in general_iterator(value):
if not (isinstance(k, base.Spec) or isinstance(v, base.Spec)): return False
return True
class KwargsField(SpecCollection):
def __init__(self):
super(KwargsField, self).__init__(default={})
@property
def allowed_types(self):
return [dict]
class ArgsField(SpecCollection):
def __init__(self):
super(ArgsField, self).__init__(default=tuple())
@property
def allowed_types(self):
return [tuple, list]
class UnboundField(object):
pass
class BaseUnboundSpec(BaseSpecField, UnboundField):
pass
def UnboundSpecField(pos=None, default=_no_default, base_type=None, spec_field_subclass=None):
spec_field_subclass = spec_field_subclass or BaseUnboundSpec
assert issubclass(spec_field_subclass, BaseUnboundSpec)
return SpecField(pos=pos, default=default, base_type=base_type, spec_field_subclass=spec_field_subclass)
class UnboundPrimitiveField(PrimitiveField, UnboundField):
pass
| {
"repo_name": "elsonidoq/fito",
"path": "fito/specs/fields.py",
"copies": "1",
"size": "5795",
"license": "mit",
"hash": -8404944358858981000,
"line_mean": 26.0794392523,
"line_max": 108,
"alpha_frac": 0.6436583261,
"autogenerated": false,
"ratio": 3.969178082191781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112836408291781,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import phonenumbers
from django.conf import settings
from django.core import validators
@total_ordering
class PhoneNumber(phonenumbers.PhoneNumber):
"""
A extended version of phonenumbers.PhoneNumber that provides
some neat and more pythonic, easy to access methods. This makes using a
PhoneNumber instance much easier, especially in templates and such.
"""
format_map = {
"E164": phonenumbers.PhoneNumberFormat.E164,
"INTERNATIONAL": phonenumbers.PhoneNumberFormat.INTERNATIONAL,
"NATIONAL": phonenumbers.PhoneNumberFormat.NATIONAL,
"RFC3966": phonenumbers.PhoneNumberFormat.RFC3966,
}
@classmethod
def from_string(cls, phone_number, region=None):
phone_number_obj = cls()
if region is None:
region = getattr(settings, "PHONENUMBER_DEFAULT_REGION", None)
phonenumbers.parse(
number=phone_number,
region=region,
keep_raw_input=True,
numobj=phone_number_obj,
)
return phone_number_obj
def __str__(self):
if self.is_valid():
format_string = getattr(settings, "PHONENUMBER_DEFAULT_FORMAT", "E164")
fmt = self.format_map[format_string]
return self.format_as(fmt)
else:
return self.raw_input
def __repr__(self):
if not self.is_valid():
return str(
"Invalid{}(raw_input={})".format(type(self).__name__, self.raw_input)
)
return super().__repr__()
def is_valid(self):
"""
checks whether the number supplied is actually valid
"""
return phonenumbers.is_valid_number(self)
def format_as(self, format):
return phonenumbers.format_number(self, format)
@property
def as_international(self):
return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@property
def as_e164(self):
return self.format_as(phonenumbers.PhoneNumberFormat.E164)
@property
def as_national(self):
return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL)
@property
def as_rfc3966(self):
return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966)
def __len__(self):
return len(str(self))
def __eq__(self, other):
"""
Override parent equality because we store only string representation
of phone number, so we must compare only this string representation
"""
if other in validators.EMPTY_VALUES:
return False
elif isinstance(other, str):
default_region = getattr(settings, "PHONENUMBER_DEFAULT_REGION", None)
other = to_python(other, region=default_region)
elif isinstance(other, type(self)):
# Nothing to do. Good to compare.
pass
elif isinstance(other, phonenumbers.PhoneNumber):
# The parent class of PhoneNumber does not have .is_valid().
# We need to make it match ours.
old_other = other
other = type(self)()
other.merge_from(old_other)
else:
return False
format_string = getattr(settings, "PHONENUMBER_DB_FORMAT", "E164")
fmt = self.format_map[format_string]
self_str = self.format_as(fmt) if self.is_valid() else self.raw_input
other_str = other.format_as(fmt) if other.is_valid() else other.raw_input
return self_str == other_str
def __lt__(self, other):
if isinstance(other, phonenumbers.PhoneNumber):
old_other = other
other = type(self)()
other.merge_from(old_other)
elif not isinstance(other, type(self)):
raise TypeError(
"'<' not supported between instances of "
"'%s' and '%s'" % (type(self).__name__, type(other).__name__)
)
invalid = None
if not self.is_valid():
invalid = self
elif not other.is_valid():
invalid = other
if invalid is not None:
raise ValueError("Invalid phone number: %r" % invalid)
format_string = getattr(settings, "PHONENUMBER_DB_FORMAT", "E164")
fmt = self.format_map[format_string]
return self.format_as(fmt) < other.format_as(fmt)
def __hash__(self):
return hash(str(self))
def to_python(value, region=None):
if value in validators.EMPTY_VALUES: # None or ''
phone_number = value
elif isinstance(value, str):
try:
phone_number = PhoneNumber.from_string(phone_number=value, region=region)
except phonenumbers.NumberParseException:
# the string provided is not a valid PhoneNumber.
phone_number = PhoneNumber(raw_input=value)
elif isinstance(value, PhoneNumber):
phone_number = value
elif isinstance(value, phonenumbers.PhoneNumber):
phone_number = PhoneNumber()
phone_number.merge_from(value)
else:
raise TypeError("Can't convert %s to PhoneNumber." % type(value).__name__)
return phone_number
def validate_region(region):
if (
region is not None
and region not in phonenumbers.shortdata._AVAILABLE_REGION_CODES
):
raise ValueError(
"“%s” is not a valid region code. Choices are %r"
% (region, phonenumbers.shortdata._AVAILABLE_REGION_CODES)
)
| {
"repo_name": "stefanfoulis/django-phonenumber-field",
"path": "phonenumber_field/phonenumber.py",
"copies": "1",
"size": "5491",
"license": "mit",
"hash": -767148670768474500,
"line_mean": 33.0807453416,
"line_max": 85,
"alpha_frac": 0.6125387279,
"autogenerated": false,
"ratio": 4.02863436123348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.514117308913348,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import pytest
from ..functional import keysorted, sliding_window
def test_sliding_window():
assert list(sliding_window([], 2)) == []
assert list(sliding_window([1, 2, 3], 2)) == [(1, 2), (2, 3)]
assert list(sliding_window([1, 2, 3, 4], 2)) == [(1, 2), (2, 3), (3, 4)]
assert list(sliding_window([1, 2, 3, 4], 3)) == [(1, 2, 3), (2, 3, 4)]
def test_keysorted():
@total_ordering
class Unorderable(object):
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
raise AssertionError("Can't compare this.")
__ne__ = __lt__ = __eq__
with pytest.raises(AssertionError):
sorted([Unorderable(0), Unorderable(0)])
d = {"c": Unorderable(3), "b": Unorderable(2), "a": Unorderable(1)}
items = keysorted(d)
assert items[0][0] == "a"
assert items[0][1].obj == 1
assert items[1][0] == "b"
assert items[1][1].obj == 2
assert items[2][0] == "c"
assert items[2][1].obj == 3
| {
"repo_name": "ssanderson/interface",
"path": "interface/tests/test_functional.py",
"copies": "1",
"size": "1032",
"license": "apache-2.0",
"hash": 690704683987189900,
"line_mean": 25.4615384615,
"line_max": 76,
"alpha_frac": 0.5484496124,
"autogenerated": false,
"ratio": 3.0352941176470587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40837437300470586,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import six
from swimlane.exceptions import UnknownField
from .base import APIResource
@total_ordering
class App(APIResource):
"""A single App record instance
Used lookup field definitions and retrieve/create child Record instances
Attributes:
name (str): App name
acronym (str): App acronym
description (str): App description
id (str): Full App ID
tracking_id (str): App tracking ID
records (RecordAdapter): :class:`~swimlane.core.adapters.record.RecordAdapter` configured for current App
reports (ReportAdapter): :class:`~swimlane.core.adapters.report.ReportAdapter` configured for current App
"""
_type = 'Core.Models.Application.Application, Core'
def __init__(self, swimlane, raw):
super(App, self).__init__(swimlane, raw)
self.acronym = self._raw['acronym']
self.name = self._raw['name']
self.description = self._raw.get('description', '')
self.id = self._raw['id']
self.tracking_id = self._raw.get('trackingFieldId')
self._fields_by_id = dict()
self._fields_by_name = dict()
self._defaults = dict()
for field in self._raw['fields']:
self._fields_by_id[field['id']] = field
self._fields_by_name[field['name']] = field
if 'fieldType' in field and field['fieldType'] == "valuesList":
selection_type = field['selectionType']
for value in field['values']:
if 'selected' in value and value['selected']:
if selection_type == 'single':
self._defaults[field['name']] = value['name']
break
else:
default = self._defaults.get(field['name'], list())
default.extend([value['name']])
self._defaults[field['name']] = default
self._keys_to_field_names = {}
for name, field_def in six.iteritems(self._fields_by_name):
# Include original name to simplify name resolution
self._keys_to_field_names[name] = name
key = field_def.get('key')
if key:
self._keys_to_field_names[key] = name
# Avoid circular import
from swimlane.core.adapters import RecordAdapter, ReportAdapter, AppRevisionAdapter
self.records = RecordAdapter(self)
self.reports = ReportAdapter(self)
self.revisions = AppRevisionAdapter(self)
def __str__(self):
return '{self.name} ({self.acronym})'.format(self=self)
def __hash__(self):
return hash((self.id, self.name))
def __lt__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Comparisons not supported between instances of '{}' and '{}'".format(
other.__class__.__name__,
self.__class__.__name__
))
return self.name < other.name
def get_cache_index_keys(self):
"""Return all fields available when retrieving apps"""
return {
'id': self.id,
'name': self.name,
'acroynm': self.acronym
}
def resolve_field_name(self, field_key):
"""Return the field name matching the given key or None. Searches field keys first, falls back to field names"""
return self._keys_to_field_names.get(field_key)
def get_field_definition_by_name(self, field_name):
"""Get JSON field definition for field matching provided name or key
.. versionchanged:: 4.1.0
Added support for field keys
Args:
field_name (str): Target field name or key to get definition for
Raises:
swimlane.exceptions.UnknownField: Raised when given a field name not found in App
Returns:
dict: Field metadata definition
"""
try:
return self._fields_by_name[self.resolve_field_name(field_name)]
except KeyError:
raise UnknownField(self, field_name, self._fields_by_name.keys())
def get_field_definition_by_id(self, field_id):
"""Get JSON field definition for field matching provided id
Args:
field_id (str): Target field ID to get definition for
Raises:
swimlane.exceptions.UnknownField: Raised when given a field ID not found in App
Returns:
dict: Field metadata definition
"""
try:
return self._fields_by_id[field_id]
except KeyError:
raise UnknownField(self, field_id, self._fields_by_id.keys())
| {
"repo_name": "Swimlane/sw-python-client",
"path": "swimlane/core/resources/app.py",
"copies": "1",
"size": "4736",
"license": "mit",
"hash": -4936046008415276000,
"line_mean": 35.1526717557,
"line_max": 120,
"alpha_frac": 0.5821368243,
"autogenerated": false,
"ratio": 4.333028362305581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005212729837188803,
"num_lines": 131
} |
from functools import total_ordering
class InvalidPeriodError(Exception):
pass
def validate_year(year):
if year < 1970 or year > 9999:
raise InvalidPeriodError("Year must be between 1970 and 9999")
def validate_month(month):
if month < 1 or month > 12:
raise InvalidPeriodError("Month must be between 1 and 12")
@total_ordering
class Period(object):
def __init__(self, period):
self.period = period
def first_day(self):
raise NotImplementedError('needs implementation')
def last_day(self):
raise NotImplementedError('needs implementation')
def format_long(self):
raise NotImplementedError('needs implementation')
def next(self):
return self + 1
def previous(self):
return self - 1
def is_current(self):
current_period = type(self)()
return current_period == self
def is_future(self):
current_period = type(self)()
return int(current_period.period) < int(self.period)
def is_past(self):
current_period = type(self)()
return int(current_period.period) > int(self.period)
def __repr__(self):
return str(self.period)
def __str__(self):
return self.__repr__()
def __unicode__(self):
return self.__repr__()
def __len__(self):
return len(self.period)
def __eq__(self, other):
if isinstance(other, str):
other = self.__class__(other) if other else None
if other is None:
return False
if not isinstance(other, self.__class__):
return False
return self.period == other.period
def __hash__(self):
return self.period.__hash__()
def __sub__(self, other):
"""
Subtracts a certain number of months from this period.
"""
return self + (-other)
def __add__(self, other):
raise NotImplementedError('needs implementation')
def __lt__(self, other):
raise NotImplementedError('needs implementation')
| {
"repo_name": "davidmarquis/pyperiods",
"path": "pyperiods/period.py",
"copies": "1",
"size": "2056",
"license": "mit",
"hash": 8126752476400411000,
"line_mean": 23.4761904762,
"line_max": 70,
"alpha_frac": 0.5992217899,
"autogenerated": false,
"ratio": 4.3559322033898304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 84
} |
from functools import total_ordering
@total_ordering
class HigherIsBetterScore(object):
"""
Provide comparison functions for scores where numerically higher values
are considered better.
@param score: The numeric score of this HSP.
"""
def __init__(self, score):
self.score = score
def __lt__(self, other):
return self.score < other.score
def __eq__(self, other):
return self.score == other.score
def betterThan(self, score):
"""
Compare this score with another score.
@param score: A C{float} score.
@return: A C{bool}, C{True} if this score is the better.
"""
return self.score > score
@total_ordering
class LowerIsBetterScore(object):
"""
Provide comparison functions for scores where numerically lower values
are considered better.
@param score: The numeric score of this LSP.
"""
def __init__(self, score):
self.score = score
def __lt__(self, other):
return self.score > other.score
def __eq__(self, other):
return self.score == other.score
def betterThan(self, score):
"""
Compare this score with another score.
@param score: A C{float} score.
@return: A C{bool}, C{True} if this score is the better.
"""
return self.score < score
| {
"repo_name": "terrycojones/dark-matter",
"path": "dark/score.py",
"copies": "3",
"size": "1367",
"license": "mit",
"hash": -840696383248295300,
"line_mean": 23.8545454545,
"line_max": 75,
"alpha_frac": 0.6100950988,
"autogenerated": false,
"ratio": 4.1676829268292686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6277778025629268,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
@total_ordering
class Match(object):
"""Object representing a match in a search index.
Attributes:
matched_object (object): the object that was matched
matched_string (string): the string representation of the object
score (float): the score of the match. Higher scores indicate a better
match.
substrings (list of tuples): optional list of substrings to mark in
the string representation of the object. Each tuple in the list
is a pair of the start and end indices of the substring.
"""
def __init__(self):
self.matched_object = None
self.matched_string = None
self.score = 0.0
self.substrings = []
def __lt__(self, other):
return self.score < other.score or \
self.matched_string < other.matched_string
def canonicalize(self):
"""Canonicalizes the match by ensuring that the ranges in the map
do not overlap with each other and are sorted by the start index."""
self.substrings = canonical_ranges(self.substrings)
def canonical_ranges(ranges):
"""Given a list of ranges of the form ``(start, end)``, returns
another list that ensures that:
- For any number *x*, *x* will be included in at most one of the returned
ranges.
- For any number *x*, *x* will be included in one of the returned ranges
if and only if *x* was included in at least one of the input ranges.
- The returned ranges are sorted by the start index.
- There exist no pairs of ranges in the returned list such that the end
of one of the ranges is the start of the other.
Args:
ranges (list of tuples): list of ranges of the form ``(start, end)``
Returns:
list of tuples: the canonical representation of the input list, as
defined by the rules above.
"""
if len(ranges) < 2:
return ranges
result = sorted(ranges)
changed = True
while changed:
if len(result) < 2:
return result
next_result, changed = [], False
prev_start, prev_end = result.pop(0)
for curr in result:
curr_start, curr_end = curr
if prev_end >= curr_start:
# prev and curr have an overlap, so merge them
prev_end = curr_end
changed = True
else:
# No overlap, prev_start and prev_end can be saved
next_result.append((prev_start, prev_end))
prev_start, prev_end = curr
next_result.append((prev_start, prev_end))
result = next_result
return result
| {
"repo_name": "ntamas/python-selecta",
"path": "selecta/matches.py",
"copies": "1",
"size": "2701",
"license": "mit",
"hash": 9098674901230635000,
"line_mean": 32.7625,
"line_max": 78,
"alpha_frac": 0.6138467234,
"autogenerated": false,
"ratio": 4.435139573070607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5548986296470607,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
@total_ordering
class Node(object):
data = None
prev_node = None
next_node = None
def __init__(self, data = None):
self.data = data
def __lt__(self, other):
if other:
return self.data < other.data
else:
return False
def __eq__(self, other):
if other:
return self.data == other.data
else:
return self is None
def __gt__(self, other):
if other:
return self.data > other.data
else:
return False
def __le__(self, other):
if other:
return self.data <= other.data
else:
return False
def __ge__(self, other):
if other:
return self.data >= other.data
else:
return False
def __str__(self):
if(self):
return str(self.data)
else:
return None
class LList(object):
head = None
tail = None
def __init__(self):
pass
def append(self, node):
if (self.head):
cur_node = self.head
while cur_node.next_node:
cur_node = cur_node.next_node
cur_node.next_node = node
node.prev_node = cur_node
else:
self.head = node
def prepend(self, node):
if (self.head):
self.head.prev_node = node
node.next_node = self.head
self.head = node
else:
self.head = node
self.head.next_node = None
def to_array(self):
ll = []
c_node = self.head
while c_node:
ll.append(c_node.data)
c_node = c_node.next_node
return ll
def remove_dups(self):
s = self.head
while s:
sp = s
sn = s.next_node
while sn:
if s == sn:
sp.next_node = sn.next_node
sn = sp.next_node
else:
sp = sn
sn = sn.next_node
s = s.next_node
def return_kth_last_itr(self,k):
cur = self.head
lead = None
lead_count = 0
lead = cur
while lead and lead_count < (k-1):
lead = lead.next_node
lead_count += 1
if(lead_count < (k-1)):
return None
while lead:
lead = lead.next_node
if(lead):
cur = cur.next_node
return cur
def return_kth_last_rec(self, k, node, kth_last_node):
print(kth_last_node)
if(node.next_node):
count = 1 + self.return_kth_last_rec(k, node.next_node, kth_last_node)
if(count == k):
kth_last_node.data = node.data
return count
else:
return 1
def pop(self):
if(self.head):
node = self.head
self.head = self.head.next_node
else:
node = None
return node
def partition(self,pivot):
node = self.pop()
lt_head = None
lt_tail = None
ge_head = None
ge_tail = None
while node:
if(node.data < pivot):
if lt_head:
lt_tail.next_node = node
lt_tail = node
else:
lt_head = node
lt_tail = node
lt_tail.next_node = None
elif(node.data >= pivot):
if ge_head:
ge_tail.next_node = node
ge_tail = node
else:
ge_head = node
ge_tail = node
ge_tail.next_node = None
node = self.pop()
lt_tail.next_node = ge_head
self.head = lt_head
def make_loopy(self,p=0):
#loop to the pth element of the list
node = self.head
if (node):
c = 0
tar = None
tail = None
while node:
if(c == p):
tar = node
if(node.next_node == None):
node.next_node = tar
break
node = node.next_node
c += 1
def find_loop_start(self):
fr = sr = self.head
while sr and fr and fr.next_node:
#print(sr, fr)
sr = sr.next_node
fr = fr.next_node.next_node
if(sr is fr):
#print("collision")
#raw_input()
break
if(sr is not fr):
return -1
else:
sr = self.head
#print(sr)
#print(fr)
count = 0
while sr and fr and sr is not fr:
sr = sr.next_node
fr = fr.next_node
count += 1
if sr is not fr:
return -1
else:
#print("----")
#print(sr)
#print(fr)
return count
def is_palindrome(self):
sr = self.head
fr = self.head
t = LList()
count = 0
while sr and fr:
count += 1
node = sr
t.prepend(node)
if(fr.next_node):
sr = sr.next_node
fr = fr.next_node.next_node
else: #even numbers of elements
break
if(fr is sr):
return True
sr = sr.next_node
pt = t.head
while sr and pt:
if(sr.data != pt.data):
return False
sr = sr.next_node
pt = pr.next_node
return True
def get_length_simple_ll(node):
count = 0
while node:
count += 1
node = node.next_node
return count
def pad_zero_simple_ll(node, p):
if node:
for i in xrange(p):
pad = Node(0)
pad.next_node = node.next_node
node.next_node = pad
pad.data = node.data
node.data = 0
def prep_addition(num1, num2):
len1 = get_length_simple_ll(num1)
len2 = get_length_simple_ll(num2)
p = len1 - len2
if p > 0:
pad_zero_simple_ll(num2, p)
elif p < 0:
pad_zero_simple_ll(num1, -p)
def add_ll_numbers_rec_lsd_first(num1, num2, carry):
if num1 == None and num2 == None and carry == 0:
return None
val = carry
if num1:
val += num1.data
next_num1 = num1.next_node
else:
next_num1 = None
if num2:
val += num2.data
next_num2 = num2.next_node
else:
next_num2 = None
carry = 1 if val > 9 else 0
#print(val, next_num1, next_num2, carry)
val = val % 10
new_digit = Node(val)
#raw_input()
part_sum = add_ll_numbers_rec_lsd_first(next_num1, next_num2, carry)
new_digit.next_node = part_sum
part_sum = new_digit
return part_sum
def _add_ll_numbers_rec_lsd_last(num1, num2, carry):
if(num1 == None and num2 == None):
carry[0] = 0
return None
#equal length asssumes the above statement holds... num2 check is redundant above
part_sum = _add_ll_numbers_rec_lsd_last(num1.next_node, num2.next_node, carry)
val = num1.data + num2.data + carry[0]
carry[0] = 1 if val > 9 else 0
val = val % 10
new_digit = Node(val)
new_digit.next_node = part_sum
part_sum = new_digit
return part_sum
def add_ll_numbers_rec_lsd_last(num1, num2):
carry = [0]
prep_addition(num1, num2)
print_simple_ll(num1, False)
print_simple_ll(num2, False)
result = _add_ll_numbers_rec_lsd_last(num1,num2, carry)
if carry[0]:
msd = Node(carry[0])
msd.next_node = result
result = msd
return result
def print_simple_ll(node, LSD_head = True, max_nodes = None):
arr = []
count = False
if max_nodes == None:
count = False
max_nodes = 1
else:
count = True
node_count = 0
while node and node_count < max_nodes:
if(LSD_head):
arr.insert(0, node.data)
else:
arr.append(node.data)
node = node.next_node
node_count += 1 if count else 0
print arr
def main():
import random
num1 = LList()
num2 = LList()
#for x in xrange(1):
# num1.append(Node(random.randrange(0,10)))
#print_simple_ll(num1.head, False)
middle = 0
for x in xrange(middle):
num2.append(Node(x))
for y in xrange(middle, -1, -1):
num2.append(Node(y))
print(num2.to_array())
print(num2.is_palindrome())
#result = add_ll_numbers_rec_lsd_first(num1.head, num2.head, 0)
#print_simple_ll(result)
#result = add_ll_numbers_rec_lsd_last(num1.head, num2.head)
#print_simple_ll(result, False)
if __name__ == '__main__':
main() | {
"repo_name": "mehmetg/python_scraps",
"path": "linked_lists.py",
"copies": "1",
"size": "6886",
"license": "unlicense",
"hash": -6984120095234640000,
"line_mean": 18.6210826211,
"line_max": 82,
"alpha_frac": 0.6171943073,
"autogenerated": false,
"ratio": 2.513138686131387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8224959446967901,
"avg_score": 0.08107470929269717,
"num_lines": 351
} |
from functools import total_ordering
@total_ordering
class OrderedType:
creation_counter = 1
def __init__(self, _creation_counter=None):
self.creation_counter = _creation_counter or self.gen_counter()
@staticmethod
def gen_counter():
counter = OrderedType.creation_counter
OrderedType.creation_counter += 1
return counter
def reset_counter(self):
self.creation_counter = self.gen_counter()
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(self, type(other)):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter < other.creation_counter
return NotImplemented
def __gt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter > other.creation_counter
return NotImplemented
def __hash__(self):
return hash((self.creation_counter))
| {
"repo_name": "graphql-python/graphene",
"path": "graphene/utils/orderedtype.py",
"copies": "1",
"size": "1223",
"license": "mit",
"hash": 5428951344424451000,
"line_mean": 30.358974359,
"line_max": 76,
"alpha_frac": 0.6516762061,
"autogenerated": false,
"ratio": 4.650190114068441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5801866320168442,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
@total_ordering
class OrderedType(object):
creation_counter = 1
def __init__(self, _creation_counter=None):
self.creation_counter = _creation_counter or self.gen_counter()
@staticmethod
def gen_counter():
counter = OrderedType.creation_counter
OrderedType.creation_counter += 1
return counter
def reset_counter(self):
self.creation_counter = self.gen_counter()
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(self, type(other)):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter < other.creation_counter
return NotImplemented
def __gt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter > other.creation_counter
return NotImplemented
def __hash__(self):
return hash((self.creation_counter))
| {
"repo_name": "Globegitter/graphene",
"path": "graphene/utils/orderedtype.py",
"copies": "4",
"size": "1231",
"license": "mit",
"hash": -3582236010326647000,
"line_mean": 30.5641025641,
"line_max": 76,
"alpha_frac": 0.6523151909,
"autogenerated": false,
"ratio": 4.645283018867924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7297598209767925,
"avg_score": null,
"num_lines": null
} |
# from functools import total_ordering
# Sadly not present under py2.6
# @total_ordering
class Node(object):
NoMatch = 0
PartialMatch = 1
FullMatch = 2
def __init__(self, value, children=None, tag=None):
from .tokens import Keyword, Variable, Literal
assert type(value) in [Keyword, Variable, Literal, tuple]
self.value = value
# TODO: allow weights for the children???
self._children = []
self._parents = []
for child in children or []:
self.add_child(child)
self.tag = tag
@property
def children(self):
return tuple(self._children)
@property
def parents(self):
return tuple(self._parents)
@property
def key(self):
"Key for this node, used for hashing and establishing ordering."
return (self.value, self.children)
def is_sink(self):
" Is this node the lowermost node in its subgraph? "
return len(self.children) == 0
def add_child(self, node):
self._children.append(node)
node.add_parent(self)
def add_parent(self, node):
self._parents.append(node)
# Matching
def match(self, word, evaluator):
assert not isinstance(self, EmptyNode)
return self.value.match(word, evaluator)
def possible_values(self, word, evaluator):
assert not isinstance(self, EmptyNode)
return self.value.possible_values(word, evaluator)
@staticmethod
def best_match_type(word, matches):
best_match = Node.NoMatch
for w in matches:
if w == word:
return Node.FullMatch
elif w.startswith(word):
best_match = Node.PartialMatch
return best_match
# Make the node type hashable
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.key == other.key
def __lt__(self, other):
return self.key < other.key
# @recursive_repr()
def __repr__(self):
format_ = "Node({value})"
if self.tag:
format_ = "Node({value}, tag={tag})"
return format_.format(value=self.value, tag=self.tag)
# Used for groupings
#
# An Optional is represented as an Empty -> content -> Empty, where the first
# Empty points to the second.
#
# An Either is represented by Empty -> content_nodes -> Empty, where the first
# content has a edge to each of the content nodes and each of the content nodes
# point to the last Empty.
class EmptyNode(Node):
def __init__(self, *args, **kw):
Node.__init__(self, tuple(), *args, **kw)
def __repr__(self):
format_ = "Node()"
if self.tag:
format_ = "Node(tag={tag})"
return format_.format(tag=self.tag)
# TODO: merge two graphs
def create_subgraph(syntax_element):
""" Takes as an argument an Keyword, Variable, Optional, Either or Manytimes
and returns the root and leaf of the subgraph. """
from .tokens import Keyword, Variable, Literal, Either, Optional, ManyTimes
if type(syntax_element) in [Keyword, Variable, Literal]:
# TODO: parse the Variable somehow?
node = Node(syntax_element)
return node, node
elif isinstance(syntax_element, Optional):
sub_node_start, sub_node_end = transform_syntax_list(syntax_element.things, empty_end=False)
end = EmptyNode([])
start = EmptyNode([sub_node_start, end], tag="Opt")
sub_node_end.add_child(end)
return start, end
elif isinstance(syntax_element, Either):
start, end = EmptyNode(tag="Either"), EmptyNode()
for child in syntax_element.things:
# nope - this should be something like parse_syntax_list!
sub_start, sub_end = transform_syntax_list(child, root_node=start, empty_end=False)
sub_end.add_child(end)
return start, end
else:
# Make the subgraph end point to the start, requiring a comma in between!
# if not isinstance(syntax_element, ManyTimes): from IPython import embed; embed()
assert isinstance(syntax_element, ManyTimes), type(syntax_element)
sub_node_start, sub_node_end = create_subgraph(syntax_element.thing)
comma_node = Node(Literal(',')) # TODO: replace with a node matching ','
sub_node_end.add_child(comma_node)
comma_node.add_child(sub_node_start)
return sub_node_start, sub_node_end
def transform_syntax_list(syntax_list, root_node=None, empty_end=True):
""" Transform a tuple of syntax elements to a language graph.
Returns the root and end node of the created graph """
iterator = iter(syntax_list)
if root_node is None:
element = next(iterator)
root_node, end_node = create_subgraph(element)
else:
end_node = root_node
for element in iterator:
start, new_end = create_subgraph(element)
end_node.add_child(start)
end_node = new_end
# Recursion matching logic assumes that a tree starts and ends with
# empty nodes
if empty_end and not isinstance(end_node, EmptyNode):
node = EmptyNode()
end_node.add_child(node)
end_node = node
return root_node, end_node
| {
"repo_name": "macobo/sqlcomplete",
"path": "sqlcomplete/language/graph.py",
"copies": "1",
"size": "5252",
"license": "mit",
"hash": 7899975330909258000,
"line_mean": 32.0314465409,
"line_max": 100,
"alpha_frac": 0.6245239909,
"autogenerated": false,
"ratio": 3.9135618479880776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5038085838888078,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
@total_ordering
class Cliente:
def __init__(self, numero, nome, sobrenome, endereco, telefone, saldo):
self.numero = numero
self.nome = nome
self.sobrenome = sobrenome
self.endereco = endereco
self.telefone = telefone
self.saldo = saldo
def __str__(self):
return "[" + \
"numero: " + str(self.numero) + ", " \
"nome: " + self.nome + ", " \
"sobrenome: " + self.sobrenome + ", " \
"endereco: " + self.endereco + ", " \
"telefone: " + self.telefone + ", " \
"saldo: " + str(self.saldo) + "]"
def __eq__(self, other):
return (
(self.numero,
self.nome.lower(),
self.sobrenome.lower(),
self.endereco.lower(),
self.telefone.lower(),
self.saldo) ==
(other.numero,
other.nome.lower(),
other.sobrenome.lower(),
other.endereco.lower(),
other.telefone.lower(),
other.saldo))
def __lt__(self, other):
return (
(self.saldo,
self.numero,
self.nome.lower(),
self.sobrenome.lower(),
self.endereco.lower(),
self.telefone.lower()) <
(other.saldo,
other.numero,
other.nome.lower(),
other.sobrenome.lower(),
other.endereco.lower(),
other.telefone.lower()))
| {
"repo_name": "possatti/kawaii",
"path": "kawaii/cliente.py",
"copies": "1",
"size": "1203",
"license": "mit",
"hash": -8802569511192374000,
"line_mean": 22.5882352941,
"line_max": 72,
"alpha_frac": 0.6034912718,
"autogenerated": false,
"ratio": 2.4753086419753085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35787999137753085,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
@total_ordering
class Clock:
def __init__(self, hour, block = 0, day = 0):
self._hour = hour
self._block = block
self._day = day
def hour(self):
return self._hour
def minute(self):
return self._block * 5
def day(self):
return self._day
def next_block(self):
if self._block == 11:
if self._hour == 23:
return Clock(0, 0, self._day + 1)
return Clock(self._hour + 1, 0, self._day)
return Clock(self._hour, self._block + 1, self._day)
def jump_forward(self, blocks):
return reduce(lambda c, _: c.next_block(), range(blocks), self)
def str(self):
return "{:02}:{:02}".format(self.hour(), self.minute())
def block_range_str(self):
return "{:02}:{:02}-{:02}:{:02}".format(self.hour(), self.minute(), self.hour(), self.minute() + 4)
def __str__(self):
return self.str()
def __eq__(self, other):
return (self._hour == other._hour) and (self._block == other._block) and (self._day == other._day)
def __lt__(self, other):
if not self._day == other._day:
return self._day < other._day
if not self._hour == other._hour:
return self._hour < other._hour
return self._block < other._block
| {
"repo_name": "tomwadley/sexting-xkeyscore",
"path": "sexting/lib/clock.py",
"copies": "1",
"size": "1374",
"license": "isc",
"hash": 7659673125851474000,
"line_mean": 25.9411764706,
"line_max": 107,
"alpha_frac": 0.5334788937,
"autogenerated": false,
"ratio": 3.7035040431266846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9615607629907581,
"avg_score": 0.024275061383820774,
"num_lines": 51
} |
from functools import total_ordering
@total_ordering
class Version():
"""Organization Class for comparable Version System
Version integer uses decimal shift:
2 digits major version, 2 digits minor version, 2 digits micro version
170100 -> 17.1.0
"""
def __init__(self, integer):
if type(integer) == str:
self.int = int(integer)
elif type(integer) == int:
self.int = integer
else:
raise TypeError("Version accepts int or str, not "+str(type(integer)))
def get_version_tuple(self):
major, minor = divmod(self.int,10000)
minor, micro = divmod(minor, 100)
return major, minor, micro
def get_name(self):
major, minor, micro = tup = self.get_version_tuple()
return ".".join((str(i) for i in tup))
@staticmethod
def from_name(name):
major, minor, micro = name.split(".")
return Version(major*10000, minor *100, micro)
def __repr__(self):
return self.name
def __str__(self):
return str(self.int)
def __eq__(self, other):
if isinstance(other, Version):
return self.int == other.int
return self.int == other
def __lt__(self, other):
if isinstance(other, Version):
return self.int < other.int
return self.int < other
def __int__(self):return self.int
name = property(get_name)
as_tuple = property(get_version_tuple)
current = Version(100)
if __name__ == "__main__":
print (current)
print (current > 200)
print (current < 100)
print (current > Version(50))
assert(Version(100) > 99)
assert(99 < Version(100))
assert(100 == Version(100))
assert(100 != Version(99))
assert(Version(100) == Version(100))
assert(Version(str(Version(100))) == Version(100))
| {
"repo_name": "vlajos/FactorioManager",
"path": "FactorioManager/version.py",
"copies": "2",
"size": "2041",
"license": "mit",
"hash": 2353183845774020600,
"line_mean": 29.9242424242,
"line_max": 86,
"alpha_frac": 0.5365017148,
"autogenerated": false,
"ratio": 4.296842105263158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5833343820063157,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
__version__ = '1.5'
@total_ordering
class Infinity(object):
def __init__(self, positive=True):
self.positive = positive
def __neg__(self):
return Infinity(not self.positive)
def __gt__(self, other):
if self == other:
return False
return self.positive
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
other.positive == self.positive
) or (
self.positive and other == float('inf')
) or (
not self.positive and other == float('-inf')
)
def __ne__(self, other):
return not (self == other)
def __bool__(self):
return True
def __nonzero__(self):
return True
def __str__(self):
return '%sinf' % ('' if self.positive else '-')
def __float__(self):
return float(str(self))
def __add__(self, other):
if is_infinite(other) and other != self:
return NotImplemented
return self
def __radd__(self, other):
return self
def __sub__(self, other):
if is_infinite(other) and other == self:
return NotImplemented
return self
def __rsub__(self, other):
return self
def timetuple(self):
return tuple()
def __abs__(self):
return self.__class__()
def __pos__(self):
return self
def __div__(self, other):
if is_infinite(other):
return NotImplemented
return Infinity(
other > 0 and self.positive or other < 0 and not self.positive
)
def __rdiv__(self, other):
return 0
def __repr__(self):
return str(self)
__truediv__ = __div__
__rtruediv__ = __rdiv__
__floordiv__ = __div__
__rfloordiv__ = __rdiv__
def __mul__(self, other):
if other == 0:
return NotImplemented
return Infinity(
other > 0 and self.positive or other < 0 and not self.positive
)
__rmul__ = __mul__
def __pow__(self, other):
if other == 0:
return NotImplemented
elif other == -self:
return -0.0 if not self.positive else 0.0
else:
return Infinity()
def __rpow__(self, other):
if other in (1, -1):
return NotImplemented
elif other == -self:
return -0.0 if not self.positive else 0.0
else:
return Infinity()
def __hash__(self):
return (self.__class__, self.positive).__hash__()
inf = Infinity()
def is_infinite(value):
return value == inf or value == -inf
| {
"repo_name": "kvesteri/infinity",
"path": "infinity.py",
"copies": "1",
"size": "2701",
"license": "bsd-3-clause",
"hash": -7468508443544702000,
"line_mean": 21.3223140496,
"line_max": 74,
"alpha_frac": 0.5135135135,
"autogenerated": false,
"ratio": 4.194099378881988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5207612892381988,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
from heapq import nlargest
from math import fabs
from itertools import dropwhile
from operator import itemgetter, add, sub, mul, truediv, pow, xor
from random import getrandbits, randint, randrange, sample
from sys import maxsize
def split_list(xs: list, n: int):
while xs:
yield xs[:n]
xs = xs[n:]
def get_rand_chromosome(bits: int):
return int(getrandbits(bits) + (1 << 32), 2)[2:]
class Genotype:
def __init__(self, chromosome_length, mutation_rate, crossover_rate, kill_rate, max_population):
self.chromosome_length = 32
self.mutation_rate = 0.001
self.crossover_rate = 0.7
self.kill_rate = 0.3
self.max_population = 2000
@total_ordering
class Organism:
from graycode import graycode
gene_pool = dict(zip(graycode(4), [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, add,
sub, mul, truediv, pow, xor]))
def __init__(self, genotype: Genotype):
self.bits = get_rand_chromosome(self.genotype.chromosome_length)
self.genotype = genotype
def __float__(self):
return float(self._eval())
def __len__(self):
return len(self.bits)
def __getitem__(self, item):
return self.bits[item]
def __iter__(self):
return iter(self.bits)
def __setslice__(self, i, j, sequence):
return self.bits[i:j:sequence]
def __str__(self):
return self.bits
def __repr__(self):
return self.bits
def __hash__(self):
return hash(self.bits)
def __le__(self, other):
return float(self) <= float(other)
def pprint(self):
return
@staticmethod
def crossover(first, second):
cut_pos = randint(0, len(min(first, second))-1)
ls = [first[:cut_pos], second[:cut_pos]]
rs = [second[cut_pos:], first[cut_pos:]]
org_one, org_two = Organism(first.genotype), Organism(first.genotype)
org_one.bits = ls[0] + rs[0]
org_two.bits = ls[1] + rs[1]
return [org_one, org_two]
def _decode(self):
ops = []
for gene in split_list(self.bits, 4):
try:
ops.append(self.gene_pool[gene])
except KeyError:
pass
return ops
def _eval(self):
genes = self._decode()
last_op = None
genes = list(dropwhile(lambda g: not isinstance(g, int), genes))
if len(genes) == 0:
return 1000.0
val = genes.pop(0)
for gene in genes:
is_dec = isinstance(gene, int)
if last_op is None and is_dec:
pass
elif last_op is None and not is_dec:
last_op = gene
elif last_op is not None and is_dec:
try:
val = last_op(val, gene)
except ZeroDivisionError:
pass
last_op = None
elif last_op is not None and not is_dec:
pass
return val
def fitness(self, base: float):
try:
v = 1.0/(fabs(base - float(self)))
return v
except ZeroDivisionError:
return maxsize
def mutate(self):
idx = randint(0, len(self.bits)-1)
self.bits = str(bit if x != idx else str(~bool(bit)) for bit, x in zip(self.bits, iter(int, 1)))
def matewith(self, other):
chldrn = []
pr = 1 / randint(1, 10)
if pr <= 0.4:
num_children = 2
elif pr <= 0.6:
num_children = 1
else:
num_children = 0
for _ in range(num_children):
pr = 1 / randint(1, 10)
if pr <= Organism.crossover_rate:
children.extend(Organism.crossover(self, other))
elif pr <= Organism.crossover_rate + 0.2:
org = Organism()
org.bits = self.bits
chldrn.append(org)
return chldrn
def half_pairs(l):
lngth = len(l) // 2
return zip(l[:lngth], l[lngth:])
def randlist(start, end, num):
return (randrange(start, end) for _ in range(num))
if __name__ == "__main__":
set_point = 19.57
organisms = [Organism() for _ in range(1000)]
winners = set()
for tick in range(20):
total_num_participants = Organism.tourney_size * Organism.num_tourneys
participant_pool = sample(organisms, total_num_participants)
organisms = organisms[total_num_participants:]
print('Max number of participants', total_num_participants)
print('Pool Size', len(participant_pool))
print('Organism count', len(organisms))
fitnesses = dict()
for participant in participant_pool:
fitnesses[participant] = participant.fitness(set_point)
victors = list()
losers = list()
for tourney_num in range(Organism.num_tourneys):
size = Organism.tourney_size
participants = itemgetter(*randlist(0, size-1, size))(participant_pool)
winners = nlargest(Organism.num_victors, participants)
total_fitness = sum(participants)
print('Fitness', total_fitness)
children = []
print('Victor Count', len(victors))
for (vic_one, vic_two) in half_pairs(victors):
alpha, beta = (vic_one, vic_two) if vic_one >= vic_two else (vic_two, vic_one)
children.extend(alpha.matewith(beta))
print('Children count', len(children))
mut_count = 0
for organism in organisms:
roll = randint(1, 1000)
if roll <= 4:
mut_count += 1
organism.mutate()
elif roll <= 8:
del organisms[organisms.index(organism)]
print('Mut count', mut_count)
print('Organism count (pre-kill)', len(organisms))
organisms = [x for x in organisms if x.fitness(set_point) >= x.kill_rate]
print('Organism count (pre-birth)', len(organisms))
organisms.extend(children)
print('Organism count (post-kill)', len(organisms), "\n")
print('')
print([(x, float(x), x.fitness(set_point)) for x in reversed(sorted(organisms, key=lambda x: x.fitness(set_point)))])
| {
"repo_name": "tannerb/genetic_math",
"path": "genetic.py",
"copies": "1",
"size": "6472",
"license": "mit",
"hash": -2632555434296313000,
"line_mean": 28.672985782,
"line_max": 121,
"alpha_frac": 0.5398640297,
"autogenerated": false,
"ratio": 3.648252536640361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46881165663403607,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering
import time
@total_ordering
class graph_node(object):
def __init__(self, name):
self.name = name
self.edge_list = []
self.visited = False
def add_node_edge(self, *edges):
for item in edges:
if item not in self.edge_list:
self.edge_list.append(item)
def __eq__(self, other):
if self.name == other.name:
return set(self.edge_list) == set(other.edge_list)
return False
def __ne__(self, other):
return not __eq__(self, other)
def __lt__(self, other):
return len(self.edge_list) > len(other.edge_list)
class graph_edge(object):
def __init__(self, node1, node2):
self.end_points = frozenset([node1.name,node2.name])
class graph(object):
def __init__(self, name):
self.name = name
self.node_dict = {}
self.path = []
def nodes(self):
return list(self.node_dict.keys())
def edges(self):
edge_list = []
for keys in self.node_dict:
for edge in self.node_dict[keys].edge_list:
edge_list.append(edge.end_points)
return set(edge_list)
def add_node(self, n):
if self.has_node(n):
raise NameError
self.node_dict[n]=graph_node(n)
def add_edge(self,n1,n2):
if self.has_node(n1) and not self.has_node(n2):
self.node_dict[n2] = graph_node(n2)
elif self.has_node(n2) and not self.has_node(n1):
self.node_dict[n1] = graph_node(n1)
elif not self.has_node(n1):
self.node_dict[n2] = graph_node(n2)
self.node_dict[n1] = graph_node(n1)
self._attach_edge(n1,n2)
def del_node(self,n):
if not self.has_node(n):
raise ValueError
for keys in self.node_dict:
if self.adjacent(self.node_dict[keys].name,n):
self.del_edge(self.node_dict[keys].name,n)
del self.node_dict[n]
def del_edge(self, n1, n2):
check_edge = graph_edge(self.node_dict[n1],self.node_dict[n2])
if self.adjacent(n1,n2):
for index, item in enumerate(self.node_dict[n1].edge_list):
if check_edge.end_points == item.end_points:
break
del self.node_dict[n1].edge_list[index]
#print self.node_dict[n1].edge_list[i]
for index, item in enumerate(self.node_dict[n2].edge_list):
if check_edge.end_points == item.end_points:
break
del self.node_dict[n2].edge_list[index]
#print self.node_dict[n1].edge_list[i]
else:
raise ValueError
def has_node(self, n):
return n in self.node_dict
def neighbors(self, n):
neighbor_list = []
if not self.has_node(n):
raise ValueError
if self.node_dict[n].edge_list == []:
return
for item in self.node_dict[n].edge_list:
neighbor_list = neighbor_list + list(item.end_points)
neighbor_list = list(set(neighbor_list))
i = neighbor_list.index(self.node_dict[n].name)
del neighbor_list[i]
return neighbor_list
def adjacent(self, n1,n2):
if n1 == n2:
return False
if not self.has_node(n1) or not self.has_node(n2):
raise ValueError
check_edge = graph_edge(self.node_dict[n1],self.node_dict[n2])
for edge in self.node_dict[n1].edge_list:
if check_edge.end_points == edge.end_points:
return True
for edge in self.node_dict[n2].edge_list:
if check_edge.end_points == edge.end_points:
return True
return False
def _attach_edge(self, n1,n2):
new_edge = graph_edge(self.node_dict[n1],self.node_dict[n2])
self.node_dict[n1].add_node_edge(new_edge)
self.node_dict[n2].add_node_edge(new_edge)
def depth_first_traversal(self, start):
if not self.has_node(start):
raise ValueError
if self.node_dict[start].visited == False:
self.path.append(start)
self.node_dict[start].visited = True
for neighbor in self.neighbors(start):
if neighbor not in self.path:
self.depth_first_traversal(neighbor)
return
def breadth_first_traversal(self, start):
if not self.has_node(start):
raise ValueError
layer = [start]
self.path.append(start)
while True:
#time.sleep(2)
holder = layer.pop(0)
for neighbor in self.neighbors(holder):
if neighbor not in self.path:
layer.append(neighbor)
self.path.append(neighbor)
print 'layer: '+''.join(layer)
print 'path: '+''.join(self.path)
if len(layer)==0:
break
return
if __name__ == '__main__':
g = graph('graph')
g.add_node('node1')
g.add_edge('node1','node2')
g.add_edge('node2','node3')
g.add_edge('node2','node4')
g.add_edge('node1','node5')
g.add_edge('node4','node6')
print g.nodes()
print g.edges()
#g.depth_first_traversal('node4')
g.breadth_first_traversal('node4')
print g.path
| {
"repo_name": "caderache2014/data_structures",
"path": "graph.py",
"copies": "1",
"size": "5590",
"license": "mit",
"hash": 5662068869718931000,
"line_mean": 30.8941176471,
"line_max": 71,
"alpha_frac": 0.5288014311,
"autogenerated": false,
"ratio": 3.553719008264463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4582520439364463,
"avg_score": null,
"num_lines": null
} |
from functools import total_ordering, wraps
class Promise:
"""
Base class for the proxy class created in the closure of the lazy function.
It's used to recognize promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turn any callable into a lazy evaluated callable. result classes or types
is required -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = str in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
cls.__str__ = cls.__text_cast
elif cls._delegate_bytes:
cls.__bytes__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode()
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_text:
return str(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
| {
"repo_name": "pyschool/story",
"path": "story/utils.py",
"copies": "1",
"size": "4594",
"license": "mit",
"hash": 8995405101140741000,
"line_mean": 34.0687022901,
"line_max": 79,
"alpha_frac": 0.5265563779,
"autogenerated": false,
"ratio": 4.678207739307536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5704764117207536,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper, lru_cache
import numpy as np
from . import _pocketfft
from ._pocketfft import helper as _helper
def next_fast_len(target, real=False):
"""Find the next fast size of input data to ``fft``, for zero-padding, etc.
SciPy's FFT algorithms gain their speed by a recursive divide and conquer
strategy. This relies on efficient functions for small prime factors of the
input length. Thus, the transforms are fastest when using composites of the
prime factors handled by the fft implementation. If there are efficient
functions for all radices <= `n` then the result will be a number `x`
>= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
numbers)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
real : bool, optional
True if the FFT involves real input or output (e.g. `rfft` or `hfft` but
not `fft`). Defaults to False.
Returns
-------
out : int
The smallest fast length greater than or equal to ``target``.
Notes
-----
The result of this function may change in future as performance
considerations change, for example if new prime factors are added.
Calling `fft` or `ifft` with real input data performs an ``'R2C'``
transform internally.
Examples
--------
On a particular machine, an FFT of prime length takes 17 ms:
>>> from scipy import fft
>>> min_len = 93059 # prime length is worst case for speed
>>> a = np.random.randn(min_len)
>>> b = fft.fft(a)
Zero-padding to the next regular length reduces computation time to
1.3 ms, a speedup of 13 times:
>>> fft.next_fast_len(min_len)
93312
>>> b = fft.fft(a, 93312)
Rounding up to the next power of 2 is not optimal, taking 1.9 ms to
compute; 1.3 times longer than the size given by ``next_fast_len``:
>>> b = fft.fft(a, 131072)
"""
pass
# Directly wrap the c-function good_size but take the docstring etc. from the
# next_fast_len function above
next_fast_len = update_wrapper(lru_cache()(_helper.good_size), next_fast_len)
next_fast_len.__wrapped__ = _helper.good_size
def _init_nd_shape_and_axes(x, shape, axes):
"""Handle shape and axes arguments for n-dimensional transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterpart.
Returns
-------
shape : array
The shape of the result. It is a 1D integer array.
axes : array
The shape of the result. It is a 1D integer array.
"""
return _helper._init_nd_shape_and_axes(x, shape, axes)
| {
"repo_name": "jor-/scipy",
"path": "scipy/fft/_helper.py",
"copies": "2",
"size": "3382",
"license": "bsd-3-clause",
"hash": -4513594013762344400,
"line_mean": 32.4851485149,
"line_max": 80,
"alpha_frac": 0.6540508575,
"autogenerated": false,
"ratio": 3.9279907084785135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5582041565978514,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper, lru_cache
from ._pocketfft import helper as _helper
def next_fast_len(target, real=False):
"""Find the next fast size of input data to ``fft``, for zero-padding, etc.
SciPy's FFT algorithms gain their speed by a recursive divide and conquer
strategy. This relies on efficient functions for small prime factors of the
input length. Thus, the transforms are fastest when using composites of the
prime factors handled by the fft implementation. If there are efficient
functions for all radices <= `n`, then the result will be a number `x`
>= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
numbers)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
real : bool, optional
True if the FFT involves real input or output (e.g., `rfft` or `hfft`
but not `fft`). Defaults to False.
Returns
-------
out : int
The smallest fast length greater than or equal to ``target``.
Notes
-----
The result of this function may change in future as performance
considerations change, for example, if new prime factors are added.
Calling `fft` or `ifft` with real input data performs an ``'R2C'``
transform internally.
Examples
--------
On a particular machine, an FFT of prime length takes 11.4 ms:
>>> from scipy import fft
>>> rng = np.random.default_rng()
>>> min_len = 93059 # prime length is worst case for speed
>>> a = rng.standard_normal(min_len)
>>> b = fft.fft(a)
Zero-padding to the next regular length reduces computation time to
1.6 ms, a speedup of 7.3 times:
>>> fft.next_fast_len(min_len, real=True)
93312
>>> b = fft.fft(a, 93312)
Rounding up to the next power of 2 is not optimal, taking 3.0 ms to
compute; 1.9 times longer than the size given by ``next_fast_len``:
>>> b = fft.fft(a, 131072)
"""
pass
# Directly wrap the c-function good_size but take the docstring etc., from the
# next_fast_len function above
next_fast_len = update_wrapper(lru_cache()(_helper.good_size), next_fast_len)
next_fast_len.__wrapped__ = _helper.good_size
def _init_nd_shape_and_axes(x, shape, axes):
"""Handle shape and axes arguments for N-D transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterparts.
Returns
-------
shape : array
The shape of the result. It is a 1-D integer array.
axes : array
The shape of the result. It is a 1-D integer array.
"""
return _helper._init_nd_shape_and_axes(x, shape, axes)
| {
"repo_name": "mdhaber/scipy",
"path": "scipy/fft/_helper.py",
"copies": "12",
"size": "3389",
"license": "bsd-3-clause",
"hash": -7103633953354199000,
"line_mean": 32.89,
"line_max": 79,
"alpha_frac": 0.6500442608,
"autogenerated": false,
"ratio": 3.899884925201381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 100
} |
from functools import update_wrapper
from collections import Mapping
VictronServicePrefix = 'com.victronenergy'
def safeadd(*values):
""" Adds all parameters passed to this function. Parameters which are None
are ignored. If all parameters are None, the function will return None
as well.
"""
values = [v for v in values if v is not None]
return sum(values) if values else None
def safemax(v0, v1):
if v0 is None or v1 is None:
return None
return max(v0, v1)
def service_base_name(service_name):
'''Returns the part of a Victron D-Bus service name that defines it type.
Example: com.victronenergy.vebus.ttyO1 yields com.victronenergy.vebus'''
if not service_name.startswith(VictronServicePrefix) or service_name[len(VictronServicePrefix)] != '.':
raise Exception('Not a victron service')
i = service_name.find('.', len(VictronServicePrefix) + 1)
if i == -1:
return service_name
return service_name[:i]
def service_instance_name(service_name, instance):
'''Combines service base name and device instance to a identifier that is unique for each D-Bus
services without relying on communication port name etc.
Example: com.victronenergy.grid.cgwacs_ttyUSB0_di30_mb1 yields com.victronenergy.grid/30'''
return '%s/%s' % (service_base_name(service_name), instance)
def gpio_paths(etc_path):
try:
with open(etc_path, 'rt') as r:
return r.read().strip().split()
except IOError:
return []
def copy_dbus_value(monitor, src_service, src_path, dest_service, dest_path, copy_invalid=False, offset=None):
value = monitor.get_value(src_service, src_path)
if copy_invalid or value is not None:
if offset is not None: value += offset
monitor.set_value(dest_service, dest_path, value)
class SmartDict(dict):
def __getattr__(self, n):
try:
return self[n]
except IndexError:
raise AttributeError(n)
def __setattr__(self, k, v):
self[k] = v
class reify(object):
""" Decorator for class methods. Turns the method into a property that
is evaluated once, and then replaces the property, effectively caching
it and evaluating it only once. """
def __init__(self, wrapped):
self.wrapped = wrapped
update_wrapper(self, wrapped)
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
class smart_dict(dict):
# Dictionary that can be accessed via attributes.
def __getattr__(self, k):
try:
v = self[k]
if isinstance(v, Mapping):
return self.__class__(v)
return v
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
self[k] = v
| {
"repo_name": "victronenergy/dbus-systemcalc-py",
"path": "sc_utils.py",
"copies": "1",
"size": "2630",
"license": "mit",
"hash": -8958475258141407000,
"line_mean": 28.2222222222,
"line_max": 110,
"alpha_frac": 0.7136882129,
"autogenerated": false,
"ratio": 3.1610576923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43747459052076926,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from datetime import timedelta
from flask import Flask
from flask import render_template
app = Flask(__name__, static_url_path='/static')
import similarity
sim = similarity.Similarity()
from flask import jsonify
from flask.ext.cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def index():
return render_template('index.html', name='pieter')
@app.route('/submit', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def distance():
logging.getLogger('flask_cors').level = logging.DEBUG
# response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return jsonify(distance=123)
str1 = request.form['username']
str2 = request.form['password']
distance = get_distance(str1, str2)
return json.dumps({'status':'OK','distance':distance})
def get_distance(str1, str2):
# todo: convert str1,2 to binarized vectors so we can calcualte distance
return sim.euclidian()
# @app.after_request
# def after_request(response):
# response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
# return response
if __name__ == '__main__':
app.run()
| {
"repo_name": "pieteradejong/joie-de-code",
"path": "strdistanceapp/app.py",
"copies": "1",
"size": "3302",
"license": "mit",
"hash": -7951466588446409000,
"line_mean": 30.75,
"line_max": 86,
"alpha_frac": 0.643549364,
"autogenerated": false,
"ratio": 3.817341040462428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4960890404462428,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from datetime import timedelta
from flask import Response, request, make_response, current_app
import json
def to_json():
def decorator(f):
def wrapped_function(*args, **kwargs):
r = json.dumps(f(*args, **kwargs))
res = Response(r, status=200, mimetype='application/json')
return res
return update_wrapper(wrapped_function, f)
return decorator
def to_xml():
def decorator(f):
def wrapped_function(*args, **kwargs):
res = Response(f(*args, **kwargs), status=200, mimetype='text/xml')
return res
return update_wrapper(wrapped_function, f)
return decorator
# http://flask.pocoo.org/mailinglist/archive/2011/8/8/add-no-cache-to-response/#952cc027cf22800312168250e59bade4
def never_cache(f):
def wrapped_function(*args, **kwargs):
res = Response(f(*args, **kwargs))
res.headers['Cache-Control'] = 'no-cache'
return res
return update_wrapper(wrapped_function, f)
# http://flask.pocoo.org/snippets/56/
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
# http://stackoverflow.com/a/7556908/707580
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
# http://flask.pocoo.org/snippets/56/#Comments[0]
f.required_methods = ['OPTIONS']
return update_wrapper(wrapped_function, f)
return decorator
| {
"repo_name": "hbrls/weixin-api-mockup",
"path": "appl/utils/decorators.py",
"copies": "1",
"size": "2775",
"license": "mit",
"hash": -1448543921657800400,
"line_mean": 30.8965517241,
"line_max": 112,
"alpha_frac": 0.6104504505,
"autogenerated": false,
"ratio": 3.9642857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5074736164785715,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from decimal import Decimal, ROUND_UP
from django.contrib import admin, messages
from django.utils.encoding import smart_str
from django import forms
from django.conf.urls import url, patterns
from django.contrib.admin.util import (unquote, flatten_fieldsets, get_deleted_objects,
model_format_dict, NestedObjects, lookup_needs_distinct)
from django.http import HttpResponse, HttpResponseRedirect
from uppsell import models
from uppsell.workflow import BadTransition
import csv
#====================================================================================
# Field formatters
#====================================================================================
def format_decimal_field(fieldname, short_description=None, quantize='0.01'):
def formatter(cls, obj):
return getattr(obj, fieldname).quantize(Decimal('.01'), rounding=ROUND_UP)
if not short_description:
short_description = fieldname.capitalize
formatter.short_description = short_description
formatter.allow_tags = False
return formatter
def format_price_field(fieldname, short_description=None):
def formatter(cls, obj):
net = getattr(obj, fieldname)
gross = net * obj.tax_rate.rate
return "%.2f (%.2f)" % (net, gross+net)
if not short_description:
short_description = fieldname.capitalize
formatter.short_description = short_description
formatter.allow_tags = False
return formatter
#====================================================================================
# Widgets
#====================================================================================
class SeparatedValuesWidget(forms.Textarea):
def __init__(self, *args, **kwargs):
self.token = kwargs.get("token", "\n")
super(SeparatedValuesWidget, self).__init__(*args, **kwargs)
def value_from_datadict(self, data, files, name):
# Return a string of comma separated integers since the database, and
# field expect a string (not a list).
return self.token.join(data.getlist(name))
def render(self, name, value, attrs=None):
# Convert comma separated integer string to a list, since the checkbox
# rendering code expects a list (not a string)
if value:
value = self.token.join([unicode(val) for val in value])
return super(SeparatedValuesWidget, self).render(
name, value, attrs=attrs
)
#====================================================================================
# Event handlers
#====================================================================================
def order_event_handler(type, event, event_name=None):
if event_name is None:
event_name = event
def handler(modeladmin, request, queryset):
for obj in queryset:
obj.event(type, event)
handler.short_description = "%s: %s"%(type, event_name)
return handler
order_actions = []
for event, event_name in models.ORDER_TRANSITIONS:
order_actions.append(order_event_handler("order", event, event_name))
for event, event_name in models.PAYMENT_TRANSITIONS:
order_actions.append(order_event_handler("payment", event, event_name))
# ====================================================================================
# IN-LINES
# ====================================================================================
class OrderEventInline(admin.TabularInline):
model = models.OrderEvent
extra = 0
can_delete = False
fields = ('action_type', 'event', 'state_before', 'state_after', 'comment', 'created_at')
readonly_fields = fields
class OrderItemInline(admin.TabularInline):
model = models.OrderItem
extra = 0
can_delete = False
fields = ('sku','product','quantity',)
readonly_fields = fields
class CustomerOrderInline(admin.TabularInline):
model = models.Order
extra = 0
can_delete = False
fields = ('store', 'order_state', 'payment_state', 'created_at')
readonly_fields = fields
class CustomerAddressInline(admin.TabularInline):
model = models.Address
extra = 0
can_delete = False
fields = ('line1', 'city', 'zip', 'country_code')
readonly_fields = fields
class OrderCustomerInline(admin.StackedInline):
model = models.Customer
extra = 0
can_delete = False
fields = ('username', 'full_name', 'phone', 'email')
readonly_fields = fields
class ProfileInline(admin.StackedInline):
model = models.Profile
extra = 0
can_delete = False
fields = ('full_name', 'document', 'created_at')
readonly_fields = fields
# ====================================================================================
# FORMS
# ====================================================================================
class ListingModelForm(forms.ModelForm):
features = forms.CharField(widget=forms.Textarea, required=False)
class Meta:
model = models.Listing
def __init__(self, *args, **kwargs):
super(ListingModelForm, self).__init__(*args, **kwargs)
if self.instance.id:
tax_rates = models.SalesTaxRate.objects.filter(store=self.instance.store)
tax_rate_field = self.fields['tax_rate'].widget
tax_rate_choices = []
tax_rate_choices.append(('', '------'))
for tax_rate in tax_rates:
tax_rate_choices.append((tax_rate.id, tax_rate))
tax_rate_field.choices = tax_rate_choices
class ProductModelForm(forms.ModelForm):
features = forms.CharField(widget=forms.Textarea, required=False)
description = forms.CharField(widget=forms.Textarea, required=False)
provisioning_codes = forms.CharField(widget=SeparatedValuesWidget, required=False)
features.widget.attrs["rows"] = 5
description.widget.attrs["rows"] = 5
provisioning_codes.widget.attrs["rows"] = 5
provisioning_codes.widget.attrs["cols"] = 100
provisioning_codes.widget.attrs["style"] = "margin: 0px; width: 400px; height: 60px;"
class Meta:
model = models.Product
# ====================================================================================
# ADMINS
# ====================================================================================
class CustomerAdmin(admin.ModelAdmin):
list_display = ('username', 'full_name', 'email', 'created_at')
search_fields = ['username', 'full_name', 'email']
inlines = (CustomerAddressInline,CustomerOrderInline,ProfileInline)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('customer', 'full_name', 'document', 'created_at')
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'show_store', 'show_customer', 'order_state', 'show_items', 'created_at')
list_filter = ('store', 'order_state', 'payment_state')
search_fields = ('id', 'customer__username', 'customer__full_name', 'customer__email')
#actions = order_actions
fields = ('store', 'customer', "show_customer", "transaction_id", "shipping_address",
"billing_address", "currency", 'order_state', 'payment_state',
'coupon', 'reference', 'payment_made_ts', 'wholesale',)
readonly_fields = ("show_customer",'order_state', 'payment_state','payment_made_ts')
# readonly_fields = ("transaction_id", 'order_state', 'payment_state', 'show_customer', 'store',
# "shipping_address", "billing_address", "currency", 'payment_made_ts',
# 'created_at', 'updated_at',)
inlines = (OrderItemInline,OrderEventInline,)
def get_urls(self):
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
myurls = patterns('',
url(r'^(.+)/event/$', wrap(self.event_view), name='%s_%s_event' % info),
)
urls = super(OrderAdmin, self).get_urls()
return myurls + urls
def event_view(self, request, object_id, extra_context=None):
id = unquote(object_id)
type, event = request.GET["type"], request.GET["event"]
order = models.Order.objects.get(pk=id)
try:
order.event(type, event)
self.message_user(request,
"Event '%s:%s' was sent to order #%s"%(type, event, id),
messages.SUCCESS)
except BadTransition:
self.message_user(request,
"Event '%s:%s' is not a valid transition for order #%s"%(type, event, id),
messages.WARNING)
return HttpResponseRedirect("/store/order/")
def action_pulldown(self, order):
html = []
for event in order.order_workflow.available:
html.append('<a href="/store/order/%d/event/?type=order&event=%s">Order: %s</a>'%(order.id, event, event))
for event in order.payment_workflow.available:
html.append('<a href="/store/order/%d/event/payment/%s">Payment: %s</a>'%(order.id, event, event))
#html.append("<option value='payment.%s'>Payment: %s</option>"%(event,event))
#html.append("</select> <input type='submit' value='Go'/></form>")
return "[" + "][".join(html) + "]"
action_pulldown.allow_tags = True
action_pulldown.short_description = "Actions"
def show_store(self, obj):
if obj.store:
return '<a href="/uppsell/store/%s">%s</a>' % (obj.store.id, obj.store)
return ""
show_store.allow_tags = True
show_store.short_description = "Store"
def show_customer(self, obj):
if not obj.customer:
return "No customer"
base = '/admin/uppsell/customer'
if obj.customer.full_name not in (None, ""):
link = '<a href="%s/%s">%s</a>' % (base, obj.customer.id, obj.customer.full_name)
else:
link = '<a href="%s/%s">%s</a>' % (base, obj.customer.id, obj.customer)
if obj.customer.email in (None, ""):
return link
return "%s (%s)" % (link, obj.customer.email)
show_customer.allow_tags = True
show_customer.short_description = "Customer ctd."
def show_email(self, obj):
if not obj.customer:
return ""
return obj.customer.email
show_email.allow_tags = True
show_email.short_description = "Email"
def show_items(self, obj):
items = models.OrderItem.objects.filter(order_id=obj.id)
return "<br/>".join([str(item) for item in items])
show_items.allow_tags = True
show_items.short_description = "Items"
class SalesTaxRateAdmin(admin.ModelAdmin):
list_display = ('name', 'store', 'abbreviation', 'rate')
class ProductAdmin(admin.ModelAdmin):
form = ProductModelForm
list_display = ('sku', 'group', 'name', 'show_pvcs')
list_filter = ('group',)
fieldsets = (
(None, {
'fields': ('sku', 'group', 'name', 'title', 'subtitle', ('description', 'features'))
}),
('Stock and shipping', {
'fields': ('shipping', 'has_stock', 'stock_units', 'provisioning_codes',)
}),
('Validity', {
'fields': ('validity_unit', 'validity',)
}),
)
def show_pvcs(self, obj):
if obj.provisioning_codes:
return "<br>".join([str(pc) for pc in obj.provisioning_codes])
return str(obj.provisioning_codes)
show_pvcs.allow_tags=True
show_pvcs.short_description = "Provisioning Codes"
class ListingAdmin(admin.ModelAdmin):
form = ListingModelForm
list_display = ('product', 'store', 'state', 'show_price', 'show_shipping')
list_filter = ('store', 'state',)
show_price = format_price_field('price', None)
show_shipping = format_decimal_field('shipping', None)
class AddressAdmin(admin.ModelAdmin):
list_display = ('customer', 'line1', 'city', 'country')
class CouponAdmin(admin.ModelAdmin):
list_display = ('code', 'name', 'max_uses', 'remaining', 'valid_until')
list_filter = ('store','product', 'product_group')
search_fields = ('code', 'name')
fieldsets = (
(None, {
'fields': ('code', 'name', 'type', 'discount_amount', 'discount_shipping')
}),
('Optional relations', {
'fields': ('store', 'customer', 'product', 'product_group')
}),
('Usage', {
'fields': ('max_uses', 'remaining', 'valid_from', 'valid_until')
}),
)
readonly_fields = ('remaining',)
# START EXPORT ACTIONS
def export_csv(modeladmin, request, queryset):
opts = queryset.model._meta
model = queryset.model
response = HttpResponse(mimetype='text/csv')
# force download
response['Content-Disposition'] = 'attachment;filename=invoices.csv'
# the csv writer
writer = csv.writer(response)
field_names = [field.name for field in opts.fields]
# Write a first row with header information
writer.writerow(field_names)
# Write data rows
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names])
return response
export_csv.short_description = u"Export to CSV"
def export_xls(modeladmin, request, queryset):
pass
def export_xlsx(modeladmin, request, queryset):
pass
# END EXPORT ACTIONS
class InvoiceAdmin(admin.ModelAdmin):
fields = ('id', 'order_id', 'customer_id', 'store_id', 'username', 'user_fullname', 'user_document_type',
'user_document', 'user_mobile_msisdn', 'user_email', 'user_dob', 'shipping_address_line1', 'shipping_address_line2',
'shipping_address_line3', 'shipping_address_city', 'shipping_address_zipcode', 'shipping_address_province',
'shipping_address_country', 'billing_address_line1', 'billing_address_line2', 'billing_address_line3',
'billing_address_city', 'billing_address_zipcode', 'billing_address_province', 'billing_address_country',
'payment_made_ts', 'order_state', 'payment_state', 'coupon', 'skus', 'products', 'currency', 'order_sub_total',
'order_shipping_total', 'order_tax_total', 'order_gross_total', 'order_discount_total', 'order_total')
list_display = ('id', 'user_fullname', 'user_document', 'payment_made_ts', 'order_sub_total',
'order_shipping_total', 'order_tax_total', 'order_discount_total', 'order_total')
readonly_fields = fields
actions = [export_csv]
admin.site.register(models.Customer, CustomerAdmin)
admin.site.register(models.Profile, ProfileAdmin)
admin.site.register(models.Address, AddressAdmin)
admin.site.register(models.Store)
admin.site.register(models.SalesTaxRate, SalesTaxRateAdmin)
admin.site.register(models.ProductGroup)
admin.site.register(models.Product, ProductAdmin)
admin.site.register(models.Listing, ListingAdmin)
admin.site.register(models.Order, OrderAdmin)
admin.site.register(models.Coupon, CouponAdmin)
admin.site.register(models.Invoice, InvoiceAdmin)
| {
"repo_name": "upptalk/uppsell",
"path": "uppsell/admin.py",
"copies": "1",
"size": "15108",
"license": "mit",
"hash": -4994485170511872000,
"line_mean": 40.8504155125,
"line_max": 130,
"alpha_frac": 0.5981599153,
"autogenerated": false,
"ratio": 3.950836820083682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048996735383682,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from django.conf import settings
from django.db import connection, DEFAULT_DB_ALIAS
from django.test import TestCase, skipUnlessDBFeature
from models import Reporter, Article
#
# The introspection module is optional, so methods tested here might raise
# NotImplementedError. This is perfectly acceptable behavior for the backend
# in question, but the tests need to handle this without failing. Ideally we'd
# skip these tests, but until #4788 is done we'll just ignore them.
#
# The easiest way to accomplish this is to decorate every test case with a
# wrapper that ignores the exception.
#
# The metaclass is just for fun.
#
def ignore_not_implemented(func):
def _inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError:
return None
update_wrapper(_inner, func)
return _inner
class IgnoreNotimplementedError(type):
def __new__(cls, name, bases, attrs):
for k,v in attrs.items():
if k.startswith('test'):
attrs[k] = ignore_not_implemented(v)
return type.__new__(cls, name, bases, attrs)
class IntrospectionTests(TestCase):
__metaclass__ = IgnoreNotimplementedError
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertTrue(Reporter._meta.db_table in tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertTrue(Article._meta.db_table in tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
cursor = connection.cursor()
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);');
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertTrue('django_ixn_testcase_table' not in tl,
"django_table_names() returned a non-Django table")
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, set([Article, Reporter]))
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertTrue(expected in sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField']
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
cursor = connection.cursor()
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
def test_get_relations(self):
cursor = connection.cursor()
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# Older versions of MySQL don't have the chops to report on this stuff,
# so just skip it if no relations come back. If they do, though, we
# should test that the response is correct.
if relations:
# That's {field_index: (field_index_other_table, other_table)}
self.assertEqual(relations, {3: (0, Reporter._meta.db_table)})
def test_get_indexes(self):
cursor = connection.cursor()
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
| {
"repo_name": "jamespacileo/django-france",
"path": "tests/regressiontests/introspection/tests.py",
"copies": "2",
"size": "4689",
"license": "bsd-3-clause",
"hash": 2835781599905321000,
"line_mean": 41.2432432432,
"line_max": 99,
"alpha_frac": 0.6596289187,
"autogenerated": false,
"ratio": 4.056228373702422,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5715857292402422,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from django.conf.urls import patterns, url
from django.contrib import admin
from windberg_results import models
from windberg_results.views import import_entries_from_csv
class ResultEntryInlineAdmin(admin.TabularInline):
model = models.ResultEntry
extra = 0
class ResultTableAdmin(admin.ModelAdmin):
model = models.ResultTable
inlines = [ResultEntryInlineAdmin]
list_display = ("_version_name", "start_time", "name", "use_age_group", "use_gender")
prepopulated_fields = {"slug": ("name",)}
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
urls = super(ResultTableAdmin, self).get_urls()
my_urls = patterns('',
url(r'^(?P<year>\d{4})/(?P<slug>[-\w]+)/import/$',
wrap(import_entries_from_csv), name="import_result_csv"),
)
return my_urls + urls
def _version_name(self, obj):
return u"%d. (%d)" % (obj.version.number, obj.version.date.year)
_version_name.short_description = u"Auflage"
admin.site.register(models.ResultTable, ResultTableAdmin)
| {
"repo_name": "janLo/Windberg-web",
"path": "windberg_results/admin.py",
"copies": "1",
"size": "1282",
"license": "bsd-3-clause",
"hash": -307629717093687600,
"line_mean": 32.7368421053,
"line_max": 89,
"alpha_frac": 0.635725429,
"autogenerated": false,
"ratio": 3.7705882352941176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.989021536611421,
"avg_score": 0.0032196596359813394,
"num_lines": 38
} |
from functools import update_wrapper
from django.conf.urls import url
from django.http import HttpResponse
from tastypie import http
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource, convert_post_to_put
from nested_resource import utils
from tastypie.utils import trailing_slash
class NestedMixin(object):
"""
A tastypie resource mixin that allows you to make any resource nested within
another resource.
"""
def get_nested_resource(self, resource_cls, parent_attr):
resource = utils.import_class(resource_cls)()
url_pattern = r"^(?P<parent_resource_name>%s)/(?P<%s>.*?)/" \
r"(?P<resource_name>%s)%s$" % \
(self._meta.resource_name, parent_attr,
resource._meta.resource_name, trailing_slash())
return url(url_pattern, resource.wrap_view('nested_dispatch_list'),
name="api_%s_dispatch_list" % resource._meta.resource_name)
def nested_dispatch_list(self, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = self._meta.nested_allowed_methods
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_list" % request_method, None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def add_nested_custom_api(self, resource_cls, parent_attr,
urlpattern, view, api_name=None):
resource = utils.import_class(resource_cls)()
api_name = api_name or view
urlpattern = r"^(?P<parent_resource_name>%s)/(?P<%s>.*?)/" \
r"(?P<resource_name>%s)/%s%s$" %\
(self._meta.resource_name, parent_attr,
resource._meta.resource_name,
urlpattern, trailing_slash())
return url(urlpattern, resource.wrap_view(view), api_name)
| {
"repo_name": "kgritesh/tastypie-nested-resource",
"path": "nested_resource/resources.py",
"copies": "1",
"size": "2851",
"license": "mit",
"hash": -4684894958498608000,
"line_mean": 36.5131578947,
"line_max": 80,
"alpha_frac": 0.629954402,
"autogenerated": false,
"ratio": 4.174231332357247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5304185734357247,
"avg_score": null,
"num_lines": null
} |
from functools import update_wrapper
from django.contrib.admin import ModelAdmin
from django.contrib.admin.util import unquote
from django.forms.models import ModelForm
from django_ace import AceWidget
from django.contrib import admin
from django.shortcuts import render
from tildeslash.blog.models import Post
class PostAdminForm(ModelForm):
class Meta:
model = Post
widgets = {
'content': AceWidget(mode='markdown', theme='twilight', width='800px', height='600px'),
}
class PostAdmin(ModelAdmin):
form = PostAdminForm
change_form_template = 'admin/custom_post_change_form.html'
def get_urls(self):
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
urls = super(PostAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'^(?P<post_id>\d+)/preview/$',
wrap(self.preview_post),
name='post.preview'),
)
return my_urls + urls
def preview_post(self, request, post_id, extra_context=None):
print post_id
post = self.get_object(request, unquote(post_id))
data = {
'post': post,
}
return render(request, 'blog/post.html', data)
admin.site.register(Post, PostAdmin) | {
"repo_name": "yshlin/tildeslash",
"path": "tildeslash/blog/admin.py",
"copies": "1",
"size": "1451",
"license": "bsd-3-clause",
"hash": -750387957575822100,
"line_mean": 28.6326530612,
"line_max": 99,
"alpha_frac": 0.6230186079,
"autogenerated": false,
"ratio": 4.00828729281768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013378684807256235,
"num_lines": 49
} |
from functools import update_wrapper
from django.contrib import admin
from django.shortcuts import redirect, render
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from .forms import MessageForm
from .models import get_device_model
Device = get_device_model()
class DeviceAdmin(admin.ModelAdmin):
list_display = ['dev_id', 'name', 'modified_date', 'is_active']
search_fields = ('dev_id', 'name')
list_filter = ['is_active']
date_hierarchy = 'modified_date'
readonly_fields = ('dev_id', 'reg_id')
actions = ['send_message_action']
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
urlpatterns = [
url(r'^send-message/$', wrap(self.send_message_view),
name=self.build_admin_url('send_message'))]
return urlpatterns + super(DeviceAdmin, self).get_urls()
def build_admin_url(self, url_name):
return '%s_%s_%s' % (self.model._meta.app_label,
self.model._meta.model_name,
url_name)
def send_message_view(self, request):
base_view = 'admin:%s' % self.build_admin_url('changelist')
session_key = 'device_ids'
device_ids = request.session.get(session_key)
if not device_ids:
return redirect(base_view)
form = MessageForm(data=request.POST or None)
if form.is_valid():
devices = Device.objects.filter(pk__in=device_ids)
for device in devices:
device.send_message(form.cleaned_data['message'])
self.message_user(request, _('Message was sent.'))
del request.session[session_key]
return redirect(base_view)
context = {'form': form, 'opts': self.model._meta, 'add': False}
return render(request, 'gcm/admin/send_message.html', context)
def send_message_action(self, request, queryset):
ids = queryset.values_list('id', flat=True)
request.session['device_ids'] = list(ids)
url = 'admin:%s' % self.build_admin_url('send_message')
return redirect(url)
send_message_action.short_description = _("Send message")
admin.site.register(Device, DeviceAdmin)
| {
"repo_name": "bogdal/django-gcm",
"path": "gcm/admin.py",
"copies": "1",
"size": "2436",
"license": "bsd-2-clause",
"hash": 7423432675928990000,
"line_mean": 35.9090909091,
"line_max": 72,
"alpha_frac": 0.6194581281,
"autogenerated": false,
"ratio": 3.8544303797468356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9973888507846835,
"avg_score": 0,
"num_lines": 66
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.