text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import partial
# version code ef5291f09f60+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
def calc_minutes_in_week():
minutes_in_hour = 60
hours_in_day = 24
days_in_week = 7
return minutes_in_hour * hours_in_day * days_in_week
## 1: (Task 1) Minutes in a Week
minutes_in_week = calc_minutes_in_week();
def modulus(numerator, denominator): return numerator - (numerator // denominator) * denominator;
## 2: (Task 2) Remainder
#For this task, your expression must use //
remainder_without_mod = modulus
def divides(denominator, numerator): return modulus(numerator, denominator) == 0
## 3: (Task 3) Divisibility
divisible_by_3 = partial(divides, 3)
## 4: (Task 4) Conditional Expression
x = -9
y = 1/2
expression_val = 2**(y+1/2) if x+10<0 else 2**(y-1/2)
## 5: (Task 5) Squares Set Comprehension
first_five_squares = { x**2 for x in {1,2,3,4,5} }
## 6: (Task 6) Powers-of-2 Set Comprehension
first_five_pows_two = { 2**x for x in {0,1,2,3,4} }
## 7: (Task 7) Double comprehension evaluating to nine-element set
# Assign three-element sets to X1 and Y1 so that
# {x*y for x in X1 for y in Y1} evaluates to a nine-element set.
X1 = { 1, 3, 5 }
Y1 = { 7, 11, 13 }
nine_element_double_set_comprehension = {x*y for x in X1 for y in Y1}
## 8: (Task 8) Double comprehension evaluating to five-element set
# Assign disjoint three-element sets to X1 and Y1 so that
# {x*y for x in X1 for y in Y1} evaluates to a five-element set.
X2 = { 0, 2, 3 }
Y2 = { 0, 2, 1 }
five_element_double_set_comprehension = {x*y for x in X2 for y in Y2}
## 9: (Task 9) Set intersection as a comprehension
S = {1, 2, 3, 4}
T = {3, 4, 5, 6}
# Replace { ... } with a one-line set comprehension that evaluates to the intersection of S and T
S_intersect_T = { x for x in S if x in T }
## 10: (Task 10) Average
list_of_numbers = [20, 10, 15, 75]
# Replace ... with a one-line expression that evaluates to the average of list_of_numbers.
# Your expression should refer to the variable list_of_numbers, and should work
# for a list of any length greater than zero.
list_average = sum(list_of_numbers) / len(list_of_numbers)
## 11: (Task 11) Cartesian-product comprehension
# Replace ... with a double list comprehension over ['A','B','C'] and [1,2,3]
L0 = ['A', 'B', 'C']
L1 = [1,2,3]
cartesian_product = [ (x,y) for x in L0 for y in L1 ]
## 12: (Task 12) Sum of numbers in list of list of numbers
LofL = [[.25, .75, .1], [-1, 0], [4, 4, 4, 4]]
# Replace ... with a one-line expression of the form sum([sum(...) ... ]) that
# includes a comprehension and evaluates to the sum of all numbers in all the lists.
LofL_sum = sum([ sum(l) for l in LofL ])
## 13: (Task 13) Three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
zero_sum_list = [ (x,y,z) for x in S for y in S for z in S if (x + y + z) == 0 ]
## 14: (Task 14) Nontrivial three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
exclude_zero_list = [ (x,y,z) for x in S for y in S for z in S if (x + y + z) == 0 and x != y and y != z and x != z ]
## 15: (Task 15) One nontrivial three-element tuple summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace ... with a one-line expression that uses a list comprehension in which S appears
first_of_tuples_list = [ t for t in zip(S,S,S) if sum(t) == 0 ]
## 16: (Task 16) List and set differ
# Assign to example_L a list such that len(example_L) != len(list(set(example_L)))
example_L = [1,1,2,3,4]
## 17: (Task 17) Odd numbers
# Replace {...} with a one-line set comprehension over a range of the form range(n)
odd_num_list_range = { n for n in range(100) if n%2 == 1 }
## 18: (Task 18) Using range and zip
# In the line below, replace ... with an expression that does not include a comprehension.
# Instead, it should use zip and range.
# Note: zip() does not return a list. It returns an 'iterator of tuples'
range_and_zip = list(zip(range(0,100,2), range(1,100,2)))
## 19: (Task 19) Using zip to find elementwise sums
A = [10, 25, 40]
B = [1, 15, 20]
# Replace [...] with a one-line comprehension that uses zip together with the variables A and B.
# The comprehension should evaluate to a list whose ith element is the ith element of
# A plus the ith element of B.
list_sum_zip = [ sum(t) for t in zip(A,B) ]
## 20: (Task 20) Extracting the value corresponding to key k from each dictionary in a list
dlist = [{'James':'Sean', 'director':'Terence'}, {'James':'Roger', 'director':'Lewis'}, {'James':'Pierce', 'director':'Roger'}]
k = 'James'
# Replace [...] with a one-line comprehension that uses dlist and k
# and that evaluates to ['Sean','Roger','Pierce']
value_list = [ kv[k] for kv in dlist ]
## 21: (Task 21) Extracting the value corresponding to k when it exists
dlist = [{'Bilbo':'Ian','Frodo':'Elijah'},{'Bilbo':'Martin','Thorin':'Richard'}]
k = 'Bilbo'
#Replace [...] with a one-line comprehension
value_list_modified_1 = [ kv[k] for kv in dlist if kv.get(k, None) != None ]
k = 'Frodo'
value_list_modified_2 = [ kv[k] for kv in dlist if kv.get(k, None) != None ]
## 22: (Task 22) A dictionary mapping integers to their squares
# Replace {...} with a one-line dictionary comprehension
square_dict = { number: number**2 for number in range(100) }
## 23: (Task 23) Making the identity function
D = {'red','white','blue'}
# Replace {...} with a one-line dictionary comprehension
identity_dict = { key: key for key in D }
## 24: (Task 24) Mapping integers to their representation over a given base
base = 10
digits = set(range(base))
# Replace { ... } with a one-line dictionary comprehension
# Your comprehension should use the variables 'base' and 'digits' so it will work correctly if these
# are assigned different values (e.g. base = 2 and digits = {0,1})
representation_dict = None #{ ... }
## 25: (Task 25) A dictionary mapping names to salaries
id2salary = {0:1000.0, 1:1200.50, 2:990}
names = ['Larry', 'Curly', 'Moe']
# Replace { ... } with a one-line dictionary comprehension that uses id2salary and names.
listdict2dict = { names[index]: id2salary[index] for index in id2salary.keys() }
## 26: (Task 26) Procedure nextInts
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def nextInts(L): return [ x + 1 for x in L ]
## 27: (Task 27) Procedure cubes
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def cubes(L): return [ x**3 for x in L ]
## 28: (Task 28) Procedure dict2list
# Input: a dictionary dct and a list keylist consisting of the keys of dct
# Output: the list L such that L[i] is the value associated in dct with keylist[i]
# Example: dict2list({'a':'A', 'b':'B', 'c':'C'},['b','c','a']) should equal ['B','C','A']
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def dict2list(dct, keylist): return [ dct[key] for key in keylist ]
## 29: (Task 29) Procedure list2dict
# Input: a list L and a list keylist of the same length
# Output: the dictionary that maps keylist[i] to L[i] for i=0,1,...len(L)-1
# Example: list2dict(['A','B','C'],['a','b','c']) should equal {'a':'A', 'b':'B', 'c':'C'}
# Complete the procedure definition by replacing { ... } with a one-line dictionary comprehension
def list2dict(L, keylist): return { kv[0]:kv[1] for kv in zip(keylist, L) }
| {
"repo_name": "josiah14/linear-algebra",
"path": "programming-the-matrix/0-week/python-lab/Python/matrix/python_lab.py",
"copies": "1",
"size": "7462",
"license": "mit",
"hash": -5676672538974620000,
"line_mean": 41.3977272727,
"line_max": 127,
"alpha_frac": 0.6699276333,
"autogenerated": false,
"ratio": 2.9540775930324625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8996188913691772,
"avg_score": 0.025563262528137957,
"num_lines": 176
} |
from functools import partial
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from soundcloud.resource import wrapped_resource
from soundcloud.request import make_request
class Client(object):
"""A client for interacting with Soundcloud resources."""
use_ssl = True
host = 'api.soundcloud.com'
def __init__(self, **kwargs):
"""Create a client instance with the provided options. Options should
be passed in as kwargs.
"""
self.use_ssl = kwargs.get('use_ssl', self.use_ssl)
self.host = kwargs.get('host', self.host)
self.scheme = self.use_ssl and 'https://' or 'http://'
self.options = kwargs
self._authorize_url = None
self.client_id = kwargs.get('client_id')
if 'access_token' in kwargs:
self.access_token = kwargs.get('access_token')
return
if 'client_id' not in kwargs:
raise TypeError("At least a client_id must be provided.")
if 'scope' in kwargs:
self.scope = kwargs.get('scope')
# decide which protocol flow to follow based on the arguments
# provided by the caller.
if self._options_for_authorization_code_flow_present():
self._authorization_code_flow()
elif self._options_for_credentials_flow_present():
self._credentials_flow()
elif self._options_for_token_refresh_present():
self._refresh_token_flow()
def exchange_token(self, code):
"""Given the value of the code parameter, request an access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'authorization_code',
'redirect_uri': self._redirect_uri(),
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'code': code,
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
return self.token
def authorize_url(self):
"""Return the authorization URL for OAuth2 authorization code flow."""
return self._authorize_url
def _authorization_code_flow(self):
"""Build the the auth URL so the user can authorize the app."""
options = {
'scope': getattr(self, 'scope', 'non-expiring'),
'client_id': self.options.get('client_id'),
'response_type': 'code',
'redirect_uri': self._redirect_uri()
}
url = '%s%s/connect' % (self.scheme, self.host)
self._authorize_url = '%s?%s' % (url, urlencode(options))
def _refresh_token_flow(self):
"""Given a refresh token, obtain a new access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'refresh_token',
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'refresh_token': self.options.get('refresh_token')
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
def _credentials_flow(self):
"""Given a username and password, obtain an access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'username': self.options.get('username'),
'password': self.options.get('password'),
'scope': getattr(self, 'scope', ''),
'grant_type': 'password'
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
def _request(self, method, resource, **kwargs):
"""Given an HTTP method, a resource name and kwargs, construct a
request and return the response.
"""
url = self._resolve_resource_name(resource)
if hasattr(self, 'access_token'):
kwargs.update(dict(oauth_token=self.access_token))
if hasattr(self, 'client_id'):
kwargs.update(dict(client_id=self.client_id))
kwargs.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
return wrapped_resource(make_request(method, url, kwargs))
def __getattr__(self, name, **kwargs):
"""Translate an HTTP verb into a request method."""
if name not in ('get', 'post', 'put', 'head', 'delete'):
raise AttributeError
return partial(self._request, name, **kwargs)
def _resolve_resource_name(self, name):
"""Convert a resource name (e.g. tracks) into a URI."""
if name[:4] == 'http': # already a url
return name
name = name.rstrip('/').lstrip('/')
return '%s%s/%s' % (self.scheme, self.host, name)
def _redirect_uri(self):
"""
Return the redirect uri. Checks for ``redirect_uri`` or common typo,
``redirect_url``
"""
return self.options.get(
'redirect_uri',
self.options.get('redirect_url', None))
# Helper functions for testing arguments provided to the constructor.
def _options_present(self, options, kwargs):
return all(map(lambda k: k in kwargs, options))
def _options_for_credentials_flow_present(self):
required = ('client_id', 'client_secret', 'username', 'password')
return self._options_present(required, self.options)
def _options_for_authorization_code_flow_present(self):
required = ('client_id', 'redirect_uri')
or_required = ('client_id', 'redirect_url')
return (self._options_present(required, self.options) or
self._options_present(or_required, self.options))
def _options_for_token_refresh_present(self):
required = ('client_id', 'client_secret', 'refresh_token')
return self._options_present(required, self.options)
| {
"repo_name": "soundcloud/soundcloud-python",
"path": "soundcloud/client.py",
"copies": "3",
"size": "6716",
"license": "bsd-2-clause",
"hash": -4092157070235273700,
"line_mean": 37.8208092486,
"line_max": 78,
"alpha_frac": 0.5860631328,
"autogenerated": false,
"ratio": 4.038484666265784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 173
} |
from functools import partial, reduce
from inspect import isfunction
from typing import Callable, Iterator, Dict, List, Tuple, Any, Optional
__all__ = ["MiddlewareManager"]
GraphQLFieldResolver = Callable[..., Any]
class MiddlewareManager:
"""Manager for the middleware chain.
This class helps to wrap resolver functions with the provided middleware functions
and/or objects. The functions take the next middleware function as first argument.
If middleware is provided as an object, it must provide a method ``resolve`` that is
used as the middleware function.
Note that since resolvers return "AwaitableOrValue"s, all middleware functions
must be aware of this and check whether values are awaitable before awaiting them.
"""
# allow custom attributes (not used internally)
__slots__ = "__dict__", "middlewares", "_middleware_resolvers", "_cached_resolvers"
_cached_resolvers: Dict[GraphQLFieldResolver, GraphQLFieldResolver]
_middleware_resolvers: Optional[List[Callable]]
def __init__(self, *middlewares: Any):
self.middlewares = middlewares
self._middleware_resolvers = (
list(get_middleware_resolvers(middlewares)) if middlewares else None
)
self._cached_resolvers = {}
def get_field_resolver(
self, field_resolver: GraphQLFieldResolver
) -> GraphQLFieldResolver:
"""Wrap the provided resolver with the middleware.
Returns a function that chains the middleware functions with the provided
resolver function.
"""
if self._middleware_resolvers is None:
return field_resolver
if field_resolver not in self._cached_resolvers:
self._cached_resolvers[field_resolver] = reduce(
lambda chained_fns, next_fn: partial(next_fn, chained_fns),
self._middleware_resolvers,
field_resolver,
)
return self._cached_resolvers[field_resolver]
def get_middleware_resolvers(middlewares: Tuple[Any, ...]) -> Iterator[Callable]:
"""Get a list of resolver functions from a list of classes or functions."""
for middleware in middlewares:
if isfunction(middleware):
yield middleware
else: # middleware provided as object with 'resolve' method
resolver_func = getattr(middleware, "resolve", None)
if resolver_func is not None:
yield resolver_func
| {
"repo_name": "graphql-python/graphql-core",
"path": "src/graphql/execution/middleware.py",
"copies": "1",
"size": "2467",
"license": "mit",
"hash": -1713970478231479000,
"line_mean": 38.1587301587,
"line_max": 88,
"alpha_frac": 0.6769355493,
"autogenerated": false,
"ratio": 4.645951035781544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016804200908948387,
"num_lines": 63
} |
from functools import partial, reduce
from mr_streams.exceptions import IllegalStreamOperationException
class EOL():
pass
class Streamer:
def __init__(self, _iter):
self.structure = iter(_iter)
self.eol = EOL()
def _build(self, expression):
self.structure = expression
self.structure = iter(self.structure)
return self
def __iter__(self):
return self
def __next__(self):
_obj = next(self.structure, self.eol)
if _obj is not self.eol:
return _obj
else:
raise StopIteration
def _flatten(self, _generator,_function):
yield from (y for x in _generator for y in _function(x))
def map(self, _function, *args, **kwargs):
_curried_function = partial(_function, *args, **kwargs)
return self._build(map(_curried_function, self.structure))
def flatmap(self, _function, *args, **kwargs):
_curried_function = partial(_function, *args, **kwargs)
return self._build(self._flatten(self.structure, _curried_function))
def reduce(self, _function, initializer = None, *args, **kwargs):
struct = iter(self.structure)
a = next(struct, self.eol)
if a is self.eol:
raise IllegalStreamOperationException("Tying to reduce reducing a stream with no values")
b = next(struct, self.eol) if initializer is None else initializer
if b is self.eol and initializer is None:
return a
_initial = _function(a,b)
_curried_function = partial(_function, *args, **kwargs)
return reduce(_curried_function, struct, _initial)
def filter(self, _function, *args, **kwargs):
_curried_function = partial(_function, *args, **kwargs)
return self._build(filter(_curried_function, self.structure))
def tap(self, _function, *args, **kwargs):
def _tap(function, iterable):
for x in iterable:
function(x)
yield x
_curried_function = partial(_function, *args, **kwargs)
return self._build(_tap(_curried_function, self.structure))
def _take(self, n, iterable):
for i, val in enumerate(iterable):
if i == n:
break
else:
yield val
def take(self, n):
return self._build(self._take(n, self.structure))
def _drop(self, n, iterable):
for i, val in enumerate(iterable):
if i >= n :
yield val
def drop(self, n):
return self._build(self._drop(n, self.structure))
def drain(self):
for _ in self.structure:
continue
if __name__ == "__main__":
pass
| {
"repo_name": "caffeine-potent/Streamer-Datastructure",
"path": "mr_streams/streamer.py",
"copies": "1",
"size": "2716",
"license": "mit",
"hash": -1501024574657173500,
"line_mean": 29.5168539326,
"line_max": 101,
"alpha_frac": 0.5850515464,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003538799176524442,
"num_lines": 89
} |
from functools import partial, reduce
from multiprocess import Pool
from operator import add
import numpy as np
import pandas
import pandas
import h5py
from scipy.sparse import linalg
from cooler.tools import split, partition
import cooler
def bnewt(matvec, mask, tol=1e-6, x0=None, delta=0.1, Delta=3, fl=0):
"""
A balancing algorithm for symmetric matrices
X = BNEWT(A) attempts to find a vector X such that
diag(X)*A*diag(X) is close to doubly stochastic. A must
be symmetric and nonnegative.
Parameters
----------
matvec : callable
Linear operator that returns the matrix-vector product with x
mask : 1D array of bool
Mask of good bins
tol : float
Error tolerance
x0 : 1D array
Initial guess
delta : float
How close balancing vectors can get to the edge of the positive cone
Delta : float
How far balancing vectors can get from the edge of the positive cone
We use a relative measure on the size of elements.
Returns
-------
x : 1D array
balancing weights
res : float
residual error, measured by norm(diag(x)*A*x - e)
"""
# Initialize
n = mask.sum()
e = np.ones(n)
if x0 is None:
x0 = e.copy()
res = []
# Inner stopping criterion parameters.
g = 0.9
etamax = 0.1
eta = etamax
stop_tol = tol * 0.5
x = x0
rt = tol ** 2
v = x * matvec(x, mask)
rk = 1 - v
rho_km1 = np.dot(rk, rk)
rout = rho_km1
rold = rout
MVP = 0 # We’ll count matrix vector products.
i = 0 # Outer iteration count.
if fl == 1:
print("it in. it res", flush=True)
# Outer iteration
while rout > rt:
i += 1
k = 0
y = e.copy()
innertol = max((eta ** 2) * rout, rt)
# Inner iteration by Conjugate Gradient
while rho_km1 > innertol:
k += 1
if k == 1:
Z = rk / v
p = Z.copy()
rho_km1 = np.dot(rk, Z)
else:
beta = rho_km1 / rho_km2
p = Z + beta * p
# Update search direction efficiently.
w = x * matvec(x * p, mask) + v * p
alpha = rho_km1 / np.dot(p, w)
ap = alpha * p
# Test distance to boundary of cone.
ynew = y + ap
if min(ynew) <= delta:
if delta == 0:
break
idx = ap < 0
gamma = np.min((delta - y[idx]) / ap[idx])
y = y + gamma * ap
break
if max(ynew) >= Delta:
idx = ynew > Delta
gamma = np.min((Delta - y[idx]) / ap[idx])
y = y + gamma * ap
break
y = ynew.copy()
rk = rk - alpha * w
rho_km2 = rho_km1
Z = rk / v
rho_km1 = np.dot(rk, Z)
x = x * y
v = x * matvec(x, mask)
rk = 1 - v
rho_km1 = np.dot(rk, rk)
rout = rho_km1
MVP += k + 1
# Update inner iteration stopping criterion.
rat = rout / rold
rold = rout
res_norm = np.sqrt(rout)
eta_o = eta
eta = g * rat
if g * (eta_o ** 2) > 0.1:
eta = max(eta, g * (eta_o ** 2))
eta = max(min(eta, etamax), stop_tol / res_norm)
if fl == 1:
print("%3d\t%6d\t%.3e" % (i, k, res_norm), flush=True)
res.append(res_norm)
print("Matrix-vector products = %6d" % (MVP,), flush=True)
x_full = np.zeros(len(mask))
x_full[mask] = x
return x_full, np.array(res)
| {
"repo_name": "open2c/cooltools",
"path": "cooltools/balance.py",
"copies": "1",
"size": "3695",
"license": "mit",
"hash": -4687656974056799000,
"line_mean": 23.7852348993,
"line_max": 76,
"alpha_frac": 0.4955320877,
"autogenerated": false,
"ratio": 3.5272206303724927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4522752718072493,
"avg_score": null,
"num_lines": null
} |
from functools import partial, reduce
from typing import Dict
class Node(object):
def __init__(self):
self.dependencies = []
self._value = None
def value(self):
return self._value
class Bot(Node):
def value(self):
if self._value is None:
a, b = (d() for d in self.dependencies)
if a < b:
self._value = a, b
else:
self._value = b, a
return super(Bot, self).value()
class Output(Node):
def value(self):
if self._value is None:
self._value = self.dependencies[0]()
return super(Output, self).value()
def update_node(nodes, node_type, node_id, func):
factory = {'bot': Bot, 'output': Output}
if node_id not in nodes[node_type]:
nodes[node_type][node_id] = factory[node_type]()
nodes[node_type][node_id].dependencies.append(func)
def part_one(instructions):
nodes: Dict[str, Dict[int, Node]] = {'bot': {}, 'output': {}}
for instruction in instructions:
words = instruction.split()
if words[0] == 'value':
value, node_type, node_id = int(words[1]), words[4], int(words[5])
def value_func(v=value):
return v
update_node(nodes, node_type, node_id, value_func)
else:
giver_type, giver_id = words[0], int(words[1])
low_type, low_id = words[5], int(words[6])
high_type, high_id = words[10], int(words[11])
def giver_func(j, t=giver_type, i=giver_id):
return nodes[t][i].value()[j]
update_node(nodes, low_type, low_id, partial(giver_func, 0))
update_node(nodes, high_type, high_id, partial(giver_func, 1))
# for node_type in sorted(nodes):
# for node_id in sorted(nodes[node_type]):
# value = nodes[node_type][node_id].value()
# print('The value of %s %s is %s.' % (node_type, node_id, value))
for bot_id, node in nodes['bot'].items():
if node.value() == (17, 61):
print('Bot %d is responsible for comparing 17 with 61.' % bot_id)
return nodes
def part_two(nodes):
node_values = (nodes['output'][output_id].value() for output_id in range(3))
product = reduce(lambda x, y: x * y, node_values)
print('The product of the first 3 outputs is: %d' % product)
def main():
with open('input.txt', 'r') as f:
instructions = f.read().split('\n')
nodes = part_one(instructions)
part_two(nodes)
if __name__ == '__main__':
main()
| {
"repo_name": "adriano-arce/Interview-Problems",
"path": "Advent-Of-Code-2016/10-Balance-Bots/10-Balance-Bots.py",
"copies": "1",
"size": "2573",
"license": "mit",
"hash": -8275759572521178000,
"line_mean": 28.2386363636,
"line_max": 80,
"alpha_frac": 0.5565487757,
"autogenerated": false,
"ratio": 3.4352469959946594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.449179577169466,
"avg_score": null,
"num_lines": null
} |
from functools import partial, reduce
import collections
import sys
import botocore
import click
import time
class Action(collections.namedtuple('Action', [ "resource", "method", "arguments", "saveas" ])):
def __new__(cls, resource, method, arguments, saveas=""):
return super(Action, cls).__new__(cls, resource, method, arguments, saveas)
def pluck(source, selector):
return reduce(lambda d,k: d.get(k, {}), selector.split('.'), source)
def unroll(pair):
get, selector = pair
selector = selector.split('.')
item = selector.pop(0)
return getattr(get(item), '.'.join(selector))
def walk(adict):
for key, value in adict.iteritems():
if isinstance(value, dict):
walk(value)
elif isinstance(value, tuple) and isinstance(value[0], partial):
adict[key] = unroll(value)
elif isinstance(value, collections.Sequence):
for item in value:
if isinstance(item, dict):
walk(item)
return adict
def execute2(context, actions):
for a in map(lambda action: Action(*action), actions):
try:
if a.method == 'create_launch_configuration':
click.echo('waiting some more..')
time.sleep(10) # AWS API bug, remove in future
resource = context[a.resource]
arguments = walk(a.arguments)
result = getattr(resource, a.method)(**arguments)
click.echo("{}... OK".format(a.method))
if a.saveas:
context[a.saveas] = result
except botocore.exceptions.ClientError as e:
Errors = ['InvalidKeyPair.Duplicate','InvalidGroup.Duplicate','InvalidPermission.Duplicate','EntityAlreadyExists','AlreadyExists', \
'InvalidGroup.NotFound','NoSuchEntity','ValidationError','LimitExceeded','DependencyViolation', 'DryRunOperation']
if e.response['Error']['Code'] in Errors:
click.echo(e.response['Error']['Message'])
else:
click.echo("Unexpected error: {}".format(e))
sys.exit("Aborting..")
return context
def DhcpConfigurations(region):
domain_name = 'ec2.internal' if region == 'us-east-1' else '{}.compute.internal'.format(region)
return [{'Key': 'domain-name-servers', 'Values': ['AmazonProvidedDNS']}, {'Key': 'domain-name', 'Values': ['{} k8s'.format(domain_name)]}]
| {
"repo_name": "cncf/demo",
"path": "cncfdemo-cli/cncfdemo/bootstrap/aws/utils.py",
"copies": "1",
"size": "2248",
"license": "apache-2.0",
"hash": -3596472936941628400,
"line_mean": 29.3783783784,
"line_max": 140,
"alpha_frac": 0.6601423488,
"autogenerated": false,
"ratio": 3.8101694915254236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4970311840325424,
"avg_score": null,
"num_lines": null
} |
from functools import partial, reduce
import operator
from django.contrib import messages
from django.contrib.admin import (register, TabularInline, StackedInline,
ModelAdmin, HORIZONTAL)
from django.contrib.admin.options import BaseModelAdmin
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.admin import SimpleListFilter
from django.contrib.gis.admin import OSMGeoAdmin
from django.db.models import Q, TextField
from django.forms.models import modelformset_factory
from django.shortcuts import redirect
from django.utils.html import format_html_join
from django.utils.translation import ugettext_lazy as _
from grappelli.forms import GrappelliSortableHiddenMixin
from reversion.admin import VersionAdmin
from super_inlines.admin import SuperInlineModelAdmin, SuperModelAdmin
from tinymce.widgets import TinyMCE
from common.utils.cache import is_user_locked, lock_user
from common.utils.file import FileAnalyzer
from .models import *
from .forms import (
OeuvreForm, SourceForm, IndividuForm, ElementDeProgrammeForm,
ElementDeDistributionForm, EnsembleForm, SaisonForm, PartieForm,
LieuAdminForm,
)
from .jobs import (
events_to_pdf as events_to_pdf_job, split_pdf as split_pdf_job,
)
from common.utils.export import launch_export
from typography.utils import replace
__all__ = ()
#
# Common
#
class CustomBaseModel(BaseModelAdmin):
# FIXME: Utiliser un AuthenticationBackend personnalisé.
def check_user_ownership(self, request, obj, has_class_permission):
if not has_class_permission:
return False
user = request.user
if obj is not None and not user.is_superuser \
and obj.owner not in user.get_descendants(include_self=True):
return False
return True
def has_change_permission(self, request, obj=None):
has_class_permission = super(CustomBaseModel,
self).has_change_permission(request, obj)
return self.check_user_ownership(request, obj, has_class_permission)
def has_delete_permission(self, request, obj=None):
# FIXME: À cause d'un bug dans
# django.contrib.admin.actions.delete_selected, cette action autorise
# un utilisateur restreint à supprimer des objets pour lesquels il n'a
# pas le droit.
has_class_permission = super(CustomBaseModel,
self).has_delete_permission(request, obj)
return self.check_user_ownership(request, obj, has_class_permission)
def get_queryset(self, request):
user = request.user
qs = super(CustomBaseModel, self).get_queryset(request)
if not user.is_superuser and IS_POPUP_VAR not in request.GET:
qs = qs.filter(
owner__in=user.get_descendants(include_self=True))
return qs
# Common fieldsets
PERIODE_D_ACTIVITE_FIELDSET = (_('Période d’activité'), {
'fields': (('debut', 'debut_precision'), ('fin', 'fin_precision'))
})
#
# Filters
#
class HasRelatedObjectsListFilter(SimpleListFilter):
title = _('possède des objets liés')
parameter_name = 'has_related_objects'
def lookups(self, request, model_admin):
return (
('1', _('Oui')),
('0', _('Non')),
)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.with_related_objects()
if self.value() == '0':
return queryset.without_related_objects()
def build_boolean_list_filter(class_title, class_parameter_name, filter=None,
exclude=None):
class HasEventsListFilter(SimpleListFilter):
title = class_title
parameter_name = class_parameter_name
def lookups(self, request, model_admin):
return (
('1', _('Oui')),
('0', _('Non')),
)
def queryset(self, request, queryset):
if self.value() == '1':
query = getattr(queryset, 'filter' if filter is not None
else 'exclude')
return query(filter if filter is not None
else exclude).distinct()
if self.value() == '0':
query = getattr(queryset, 'filter' if exclude is not None
else 'exclude')
return query(exclude if exclude is not None
else filter).distinct()
return HasEventsListFilter
EventHasSourceListFilter = build_boolean_list_filter(_('source'), 'has_source',
exclude=Q(sources=None))
EventHasProgramListFilter = build_boolean_list_filter(
_('programme'), 'has_program',
Q(programme__isnull=False) | Q(relache=True))
SourceHasParentListFilter = build_boolean_list_filter(
_('a un parent'), 'has_parent', filter=Q(parent__isnull=False),
)
SourceHasEventsListFilter = build_boolean_list_filter(
_('événements'), 'has_events', exclude=Q(evenements=None))
SourceHasProgramListFilter = build_boolean_list_filter(
_('programme'), 'has_program',
Q(evenements__programme__isnull=False) | Q(evenements__relache=True))
#
# Inlines
#
class CustomTabularInline(TabularInline, CustomBaseModel):
extra = 0
exclude = ('owner',)
class CustomStackedInline(StackedInline, CustomBaseModel):
extra = 0
exclude = ('owner',)
class OeuvreMereInline(CustomTabularInline):
model = ParenteDOeuvres
verbose_name = model._meta.get_field('mere').verbose_name
verbose_name_plural = _('œuvres mères')
fk_name = 'fille'
raw_id_fields = ('mere',)
autocomplete_lookup_fields = {
'fk': ('mere',),
}
fields = ('type', 'mere')
classes = ('grp-collapse grp-closed',)
class PupitreInline(CustomTabularInline):
model = Pupitre
verbose_name = model._meta.verbose_name
verbose_name_plural = _('effectif')
raw_id_fields = ('partie',)
autocomplete_lookup_fields = {
'fk': ['partie'],
}
fields = ('partie', 'soliste', 'quantite_min', 'quantite_max',
'facultatif')
classes = ('grp-collapse grp-closed',)
class IndividuParentInline(CustomTabularInline):
model = ParenteDIndividus
verbose_name = model._meta.get_field('parent').verbose_name
verbose_name_plural = _('individus parents')
fk_name = 'enfant'
raw_id_fields = ('parent',)
autocomplete_lookup_fields = {
'fk': ('parent',),
}
fields = ('type', 'parent')
classes = ('grp-collapse grp-closed',)
class OeuvreLieesInline(StackedInline):
model = Oeuvre
classes = ('grp-collapse grp-closed',)
class AuteurInline(CustomTabularInline):
model = Auteur
raw_id_fields = ('individu', 'ensemble', 'profession')
autocomplete_lookup_fields = {
'fk': ['individu', 'ensemble', 'profession'],
}
fields = ('individu', 'ensemble', 'profession')
def get_formset(self, request, obj=None, **kwargs):
formset = super(AuteurInline,
self).get_formset(request, obj=obj, **kwargs)
if request.method == 'POST' or 'extrait_de' not in request.GET:
return formset
# Lorsqu’on saisit un extrait, il faut que les auteurs
# soient déjà remplis, l’utilisateur n’aura qu’à les modifier dans les
# cas où cela ne correspondrait pas à l’œuvre mère (par exemple
# pour une ouverture d’opéra où le librettiste n’est pas auteur).
extrait_de = Oeuvre.objects.get(pk=request.GET['extrait_de'])
initial = list(
extrait_de.auteurs.values('individu', 'ensemble', 'profession'))
class TmpFormset(formset):
extra = len(initial)
def __init__(self, *args, **kwargs):
kwargs['initial'] = initial
super(TmpFormset, self).__init__(*args, **kwargs)
return TmpFormset
class MembreInline(CustomStackedInline):
model = Membre
raw_id_fields = ('individu', 'instrument', 'profession')
autocomplete_lookup_fields = {
'fk': ['individu', 'instrument', 'profession'],
}
fieldsets = (
(None, {'fields': (
'individu', 'instrument', 'profession', 'classement',
)}),
PERIODE_D_ACTIVITE_FIELDSET,
)
class ElementDeDistributionInline(SuperInlineModelAdmin, CustomTabularInline):
model = ElementDeDistribution
form = ElementDeDistributionForm
verbose_name_plural = _('distribution')
raw_id_fields = ('individu', 'ensemble', 'partie', 'profession')
autocomplete_lookup_fields = {
'fk': ['individu', 'ensemble', 'partie', 'profession'],
}
fields = ('individu', 'ensemble', 'partie', 'profession')
classes = ('grp-collapse grp-open',)
def get_queryset(self, request):
qs = super(ElementDeDistributionInline, self).get_queryset(request)
return qs.select_related('individu', 'ensemble', 'partie', 'profession')
class ElementDeProgrammeInline(SuperInlineModelAdmin,
GrappelliSortableHiddenMixin,
CustomStackedInline):
model = ElementDeProgramme
form = ElementDeProgrammeForm
verbose_name_plural = _('programme')
fieldsets = (
(None, {
'fields': (('oeuvre', 'autre',), 'caracteristiques',
('numerotation', 'part_d_auteur'),
'position'),
}),
)
raw_id_fields = ('oeuvre', 'caracteristiques',)
autocomplete_lookup_fields = {
'fk': ('oeuvre',),
'm2m': ('caracteristiques',),
}
classes = ('grp-collapse grp-open',)
inlines = (ElementDeDistributionInline,)
def get_queryset(self, request):
qs = super(ElementDeProgrammeInline, self).get_queryset(request)
return qs.select_related('oeuvre').prefetch_related(
'caracteristiques', 'distribution',
'distribution__individu', 'distribution__ensemble',
'distribution__partie', 'distribution__profession')
class SourceEvenementInline(TabularInline):
model = SourceEvenement
verbose_name = _('événement lié')
verbose_name_plural = _('événements liés')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('evenement',)
related_lookup_fields = {
'fk': ('evenement',),
}
class SourceOeuvreInline(TabularInline):
model = SourceOeuvre
verbose_name = _('œuvre liée')
verbose_name_plural = _('œuvres liées')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('oeuvre',)
autocomplete_lookup_fields = {
'fk': ('oeuvre',),
}
class SourceIndividuInline(TabularInline):
model = SourceIndividu
verbose_name = _('individu lié')
verbose_name_plural = _('individus liés')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('individu',)
autocomplete_lookup_fields = {
'fk': ('individu',),
}
class SourceEnsembleInline(TabularInline):
model = SourceEnsemble
verbose_name = _('ensemble lié')
verbose_name_plural = _('ensembles liés')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('ensemble',)
autocomplete_lookup_fields = {
'fk': ('ensemble',),
}
class SourceLieuInline(TabularInline):
model = SourceLieu
verbose_name = _('lieu lié')
verbose_name_plural = _('lieux liés')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('lieu',)
autocomplete_lookup_fields = {
'fk': ('lieu',),
}
class SourcePartieInline(TabularInline):
model = SourcePartie
verbose_name = _('rôle ou instrument lié')
verbose_name_plural = _('rôles ou instruments liés')
classes = ('grp-collapse grp-closed',)
extra = 0
raw_id_fields = ('partie',)
autocomplete_lookup_fields = {
'fk': ('partie',),
}
#
# ModelAdmins
#
# FIXME: Workaround for https://code.djangoproject.com/ticket/26184
# Remove when fixed.
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field = None
# Go through the fields (following all relations) and look for an m2m
for lookup_part in lookup_path.split('__'):
if field is not None:
# Checks whether the current lookup part is not a field.
try:
if field.get_transform(lookup_part) is not None \
or field.get_lookup(lookup_part) is not None:
continue
except (NotImplementedError, TypeError):
continue
field = opts.get_field(lookup_part)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
class CommonAdmin(CustomBaseModel, ModelAdmin):
list_per_page = 20
save_as = True
additional_fields = ('owner',)
additional_readonly_fields = ('owner',)
admin_fields = ()
additional_list_display = ('owner',)
additional_list_editable = ()
additional_list_filters = ('owner', HasRelatedObjectsListFilter,)
fieldsets_and_inlines_order = ()
def __init__(self, *args, **kwargs):
self.readonly_fields += self.additional_readonly_fields
self.list_display += self.additional_list_display
self.list_filter += self.additional_list_filters
self.added_fieldsets = ()
super(CommonAdmin, self).__init__(*args, **kwargs)
def get_fieldsets(self, request, obj=None):
fieldsets = super(CommonAdmin, self).get_fieldsets(request, obj=obj)
# Si fields ou fieldsets sont définis, alors le formulaire
# est fait automatiquement et inclut donc les champs qu'on voudrait
# ajouter ci-dessous.
if self.fields or self.fieldsets:
added_fields = self._get_added_fields(
request, 'additional_fields', excluded=self.exclude or (),
)
if added_fields:
self.added_fieldsets = (
(_('Notes'), {
'classes': ('grp-collapse grp-closed',),
'fields': added_fields,
}),
)
return tuple(fieldsets) + self.added_fieldsets
def _get_added_fields(self, request, additional_fields_attname,
excluded=()):
if not request.user.is_superuser:
excluded += self.admin_fields
added_fields = []
for added_field in getattr(self, additional_fields_attname, ()):
if added_field not in excluded:
added_fields.append(added_field)
return tuple(added_fields)
def save_model(self, request, obj, form, change):
if hasattr(obj, 'owner') and obj.owner is None:
obj.owner = request.user
super(CommonAdmin, self).save_model(request, obj, form, change)
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if hasattr(instance, 'owner') and instance.owner is None:
instance.owner = request.user
instance.save()
formset.save()
def get_list_editable(self, request, **kwargs):
added_editable_fields = self._get_added_fields(
request, 'additional_list_editable')
return tuple(self.list_editable) + added_editable_fields
def get_changelist_formset(self, request, **kwargs):
"""
Modified version of the overriden method.
"""
defaults = {
'formfield_callback': partial(
self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
list_editable = self.get_list_editable(request, **kwargs)
return modelformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=list_editable, **defaults)
def get_changelist(self, request, **kwargs):
ChangeList = super(CommonAdmin, self).get_changelist(request, **kwargs)
list_editable = self.get_list_editable(request, **kwargs)
class NewChangeList(ChangeList):
def __init__(self, *args, **kwargs):
super(NewChangeList, self).__init__(*args, **kwargs)
if not self.is_popup:
self.list_editable = list_editable
return NewChangeList
def get_search_results(self, request, queryset, search_term):
search_term = replace(search_term)
# FIXME: What follows is a copy of the original get_search_results.
# It is a workaround to https://code.djangoproject.com/ticket/26184
# Remove when fixed.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
class PublishedAdmin(CommonAdmin):
additional_fields = ('etat', 'owner')
admin_fields = ('etat',)
additional_list_display = ('etat', 'owner')
additional_list_editable = ('etat',)
additional_list_filters = ('etat', 'owner', HasRelatedObjectsListFilter,)
class AutoriteAdmin(PublishedAdmin):
additional_fields = ('etat', 'notes_publiques', 'notes_privees', 'owner')
class TypeDeParenteCommonAdmin(CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'nom_relatif',
'nom_relatif_pluriel', 'classement',)
list_editable = ('nom', 'nom_pluriel', 'nom_relatif',
'nom_relatif_pluriel', 'classement',)
search_fields = ('nom__unaccent', 'nom_relatif__unaccent',
'nom_pluriel__unaccent', 'nom_relatif_pluriel__unaccent')
fieldsets = (
(None, {'fields': (
('nom', 'nom_pluriel'), ('nom_relatif', 'nom_relatif_pluriel'),
'classement',
)
}),
)
@register(TypeDeParenteDOeuvres)
class TypeDeParenteDOeuvresAdmin(VersionAdmin, TypeDeParenteCommonAdmin):
pass
@register(TypeDeParenteDIndividus)
class TypeDeParenteDIndividusAdmin(VersionAdmin, TypeDeParenteCommonAdmin):
pass
@register(Etat)
class EtatAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'public',
'has_related_objects')
list_editable = ('nom', 'nom_pluriel', 'public')
@register(NatureDeLieu)
class NatureDeLieuAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'referent',)
list_editable = ('nom', 'nom_pluriel', 'referent',)
list_filter = ('referent',)
search_fields = ('nom__unaccent', 'nom_pluriel__unaccent')
@register(Lieu)
class LieuAdmin(OSMGeoAdmin, AutoriteAdmin):
form = LieuAdminForm
list_display = ('__str__', 'nom', 'parent', 'nature', 'link',)
list_editable = ('nom', 'parent', 'nature',)
search_fields = ('nom__unaccent', 'parent__nom__unaccent',)
list_filter = ('nature',)
raw_id_fields = ('parent',)
autocomplete_lookup_fields = {
'fk': ['parent'],
}
readonly_fields = ('__str__', 'html', 'link',)
fieldsets = (
(None, {
'fields': (('nom', 'parent'), ('nature', 'is_institution'),
'historique', 'geometry', ('latitude', 'longitude')),
}),
)
layerswitcher = False
default_lon = 300000
default_lat = 5900000
default_zoom = 5
point_zoom = default_zoom
@register(Saison)
class SaisonAdmin(VersionAdmin, CommonAdmin):
form = SaisonForm
list_display = ('__str__', 'lieu', 'ensemble', 'debut', 'fin',
'evenements_count')
date_hierarchy = 'debut'
raw_id_fields = ('lieu', 'ensemble')
autocomplete_lookup_fields = {
'fk': ['lieu', 'ensemble'],
}
@register(Profession)
class ProfessionAdmin(VersionAdmin, AutoriteAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'nom_feminin',
'nom_feminin_pluriel', 'parent', 'classement')
list_editable = ('nom', 'nom_pluriel', 'nom_feminin',
'nom_feminin_pluriel', 'parent', 'classement')
search_fields = (
'nom__unaccent', 'nom_pluriel__unaccent',
'nom_feminin__unaccent', 'nom_feminin_pluriel__unaccent')
raw_id_fields = ('parent',)
autocomplete_lookup_fields = {
'fk': ('parent',),
}
fieldsets = (
(None, {
'fields': ('nom', 'nom_pluriel',
'nom_feminin', 'nom_feminin_pluriel',
'parent', 'classement'),
}),
)
@register(Individu)
class IndividuAdmin(VersionAdmin, AutoriteAdmin):
list_per_page = 20
list_display = ('__str__', 'nom', 'prenoms',
'pseudonyme', 'titre', 'naissance',
'deces', 'calc_professions', 'link',)
list_editable = ('nom', 'titre',)
search_fields = (
'nom__unaccent', 'pseudonyme__unaccent', 'nom_naissance__unaccent',
'prenoms__unaccent',)
list_filter = ('titre',)
form = IndividuForm
raw_id_fields = ('naissance_lieu', 'deces_lieu', 'professions')
autocomplete_lookup_fields = {
'fk': ('naissance_lieu', 'deces_lieu'),
'm2m': ('professions', 'parentes'),
}
readonly_fields = ('__str__', 'html', 'link',)
inlines = (IndividuParentInline,)
fieldsets = (
(None, {
'fields': (('titre', 'prenoms'), ('particule_nom', 'nom'),
'professions',),
}),
(_('Naissance'), {
'fields': (
('naissance_date', 'naissance_date_approx'),
('naissance_lieu', 'naissance_lieu_approx'))
}),
(_('Décès'), {
'fields': (
('deces_date', 'deces_date_approx'),
('deces_lieu', 'deces_lieu_approx'))
}),
(_('Informations complémentaires'), {
'classes': ('grp-collapse grp-closed',),
'fields': ('pseudonyme',
'prenoms_complets',
('particule_nom_naissance', 'nom_naissance'),
'designation', 'biographie', ('isni', 'sans_isni')),
}),
)
fieldsets_and_inlines_order = ('f', 'f', 'f', 'f', 'i', 'i')
def get_queryset(self, request):
qs = super(IndividuAdmin, self).get_queryset(request)
return qs.select_related(
'naissance_lieu', 'deces_lieu', 'etat', 'owner'
).prefetch_related('professions')
@register(TypeDEnsemble)
class TypeDEnsembleAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'parent')
list_editable = ('nom', 'nom_pluriel', 'parent')
search_fields = ('nom__unaccent', 'nom_pluriel__unaccent',)
raw_id_fields = ('parent',)
autocomplete_lookup_fields = {
'fk': ('parent',),
}
@register(Ensemble)
class EnsembleAdmin(VersionAdmin, AutoriteAdmin):
form = EnsembleForm
list_display = ('__str__', 'type', 'membres_count')
search_fields = ('nom__unaccent', 'membres__individu__nom__unaccent')
inlines = (MembreInline,)
raw_id_fields = ('siege', 'type')
autocomplete_lookup_fields = {
'fk': ('siege', 'type'),
}
fieldsets = (
(None, {
'fields': (('particule_nom', 'nom'), 'type', 'siege',
('isni', 'sans_isni')),
}),
PERIODE_D_ACTIVITE_FIELDSET,
)
fieldsets_and_inlines_order = ('f', 'f', 'i')
@register(GenreDOeuvre)
class GenreDOeuvreAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'has_related_objects')
list_editable = ('nom', 'nom_pluriel',)
search_fields = ('nom__unaccent', 'nom_pluriel__unaccent',)
raw_id_fields = ('parents',)
autocomplete_lookup_fields = {
'm2m': ('parents',),
}
@register(TypeDeCaracteristiqueDeProgramme)
class TypeDeCaracteristiqueDeProgrammeAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel', 'classement',)
list_editable = ('nom', 'nom_pluriel', 'classement',)
search_fields = ('nom__unaccent', 'nom_pluriel__unaccent')
@register(CaracteristiqueDeProgramme)
class CaracteristiqueDeProgrammeAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'type', 'valeur', 'classement',)
list_editable = ('valeur', 'classement',)
search_fields = ('type__nom__unaccent', 'valeur__unaccent')
@register(Partie)
class PartieAdmin(VersionAdmin, AutoriteAdmin):
form = PartieForm
list_display = (
'__str__', 'nom', 'parent', 'oeuvre', 'classement',
'premier_interprete',
)
list_editable = (
'nom', 'parent', 'oeuvre', 'classement', 'premier_interprete',
)
list_filter = ('type',)
list_select_related = ('parent', 'etat', 'owner')
search_fields = ('nom__unaccent',)
radio_fields = {'type': HORIZONTAL}
raw_id_fields = ('oeuvre', 'professions', 'parent', 'premier_interprete')
autocomplete_lookup_fields = {
'm2m': ('professions',),
'fk': ('oeuvre', 'parent', 'premier_interprete'),
}
fieldsets = (
(None, {
'fields': (
'type', ('nom', 'nom_pluriel'),
'oeuvre', 'professions', 'parent', 'classement',
'premier_interprete',
),
}),
)
@register(Oeuvre)
class OeuvreAdmin(VersionAdmin, AutoriteAdmin):
form = OeuvreForm
list_display = ('__str__', 'titre', 'titre_secondaire', 'genre',
'caracteristiques_html', 'auteurs_html',
'creation', 'link',)
search_fields = Oeuvre.autocomplete_search_fields(add_icontains=False)
list_filter = ('genre', 'tonalite', 'arrangement', 'type_extrait')
list_select_related = ('genre', 'etat', 'owner')
date_hierarchy = 'creation_date'
raw_id_fields = ('genre', 'extrait_de', 'creation_lieu')
autocomplete_lookup_fields = {
'fk': ('genre', 'extrait_de', 'creation_lieu'),
}
readonly_fields = ('__str__', 'html', 'link',)
inlines = (AuteurInline, PupitreInline, OeuvreMereInline)
fieldsets = (
(_('Titre significatif'), {
'fields': (('prefixe_titre', 'titre',), 'coordination',
('prefixe_titre_secondaire', 'titre_secondaire',),),
}),
(None, {
'fields': (('genre', 'numero'), ('coupe', 'indeterminee')),
}),
(_('Données musicales'), {
'fields': ('incipit', ('tempo', 'tonalite'),
('sujet', 'arrangement')),
}),
(None, {
'fields': (('surnom', 'nom_courant'),),
}),
(None, {
'fields': (('opus', 'ict'),),
}),
(None, {
'fields': ('extrait_de', ('type_extrait', 'numero_extrait')),
}),
(_('Création'), {
'fields': (
'creation_type',
('creation_date', 'creation_date_approx'),
('creation_heure', 'creation_heure_approx'),
('creation_lieu', 'creation_lieu_approx'))
}),
)
fieldsets_and_inlines_order = ('i', 'f', 'f', 'i', 'f', 'f', 'f', 'f', 'f')
def get_queryset(self, request):
qs = super(OeuvreAdmin, self).get_queryset(request)
return qs.select_related(
'genre', 'extrait_de', 'creation_lieu',
'etat', 'owner'
).prefetch_related(
'auteurs__individu', 'auteurs__ensemble', 'auteurs__profession',
'pupitres__partie'
)
MAX_EXPORTED_EVENTS = 200
def events_to_pdf(modeladmin, request, queryset):
# Ensures the user is not trying to see something he should not.
queryset = queryset.published(request)
n = queryset.count()
if n > MAX_EXPORTED_EVENTS:
modeladmin.message_user(
request,
'Trop d’événements sélectionnés pour l’export ; '
'seuls les %s premiers seront exportés' % MAX_EXPORTED_EVENTS,
messages.WARNING)
queryset = queryset[:MAX_EXPORTED_EVENTS]
n = MAX_EXPORTED_EVENTS
launch_export(
events_to_pdf_job, request,
list(queryset.values_list('pk', flat=True)), 'PDF', 'de %s événements' % n)
events_to_pdf.short_description = _('Exporter en PDF')
@register(Evenement)
class EvenementAdmin(SuperModelAdmin, VersionAdmin, AutoriteAdmin):
list_display = ('__str__', 'relache', 'circonstance',
'has_source', 'has_program', 'link',)
list_editable = ('relache', 'circonstance',)
search_fields = ('circonstance__unaccent', 'debut_lieu__nom__unaccent')
list_filter = ('relache', EventHasSourceListFilter,
EventHasProgramListFilter)
list_select_related = ('debut_lieu', 'debut_lieu__nature',
'fin_lieu', 'fin_lieu__nature',
'etat', 'owner')
date_hierarchy = 'debut_date'
raw_id_fields = ('debut_lieu', 'fin_lieu', 'caracteristiques')
autocomplete_lookup_fields = {
'fk': ('debut_lieu', 'fin_lieu'),
'm2m': ('caracteristiques',),
}
readonly_fields = ('__str__', 'html', 'link')
inlines = (ElementDeDistributionInline, ElementDeProgrammeInline)
actions = [events_to_pdf]
fieldsets = (
(_('Début'), {
'fields': (
('debut_date', 'debut_date_approx'),
('debut_heure', 'debut_heure_approx'),
('debut_lieu', 'debut_lieu_approx'))
}),
(_('Fin'), {
'classes': ('grp-collapse grp-closed',),
'fields': (
('fin_date', 'fin_date_approx'),
('fin_heure', 'fin_heure_approx'),
('fin_lieu', 'fin_lieu_approx'))
}),
(None, {
'fields': (('circonstance', 'programme_incomplet', 'relache',),
'caracteristiques',),
}),
(_('Données économiques'), {
'classes': ('grp-collapse grp-closed',),
'fields': (('recette_generale', 'recette_par_billets'),),
}),
)
fieldsets_and_inlines_order = ('f', 'f', 'f', 'i', 'i')
def get_queryset(self, request):
qs = super(EvenementAdmin, self).get_queryset(request)
qs = qs.extra(select={
'_has_program':
'EXISTS (SELECT 1 FROM %s WHERE evenement_id = %s.id)'
% (ElementDeProgramme._meta.db_table, Evenement._meta.db_table),
'_has_source':
'EXISTS (SELECT 1 FROM %s WHERE evenement_id = %s.id)'
% (Source.evenements.field.m2m_db_table(),
Evenement._meta.db_table)})
return qs.select_related(
'debut_lieu', 'debut_lieu__nature',
'debut_lieu__parent', 'debut_lieu__parent__nature',
'etat', 'owner')
@register(TypeDeSource)
class TypeDeSourceAdmin(VersionAdmin, CommonAdmin):
list_display = ('__str__', 'nom', 'nom_pluriel',)
list_editable = ('nom', 'nom_pluriel',)
search_fields = ('nom__unaccent', 'nom_pluriel__unaccent')
def split_pdf(modeladmin, request, queryset):
# Ensures the user is not trying to see something he should not.
queryset = queryset.published(request)
queryset = queryset.filter(
type_fichier=FileAnalyzer.OTHER, fichier__endswith='.pdf',
children__isnull=True,
)
if not queryset:
messages.warning(
request,
_('Aucune source sélectionnée n’est un PDF sans enfant.')
)
return
if is_user_locked(request.user):
messages.error(
request,
_('Une séparation de PDF de votre part est déjà en cours. '
'Veuillez attendre la fin de celle-ci avant '
'd’en lancer une autre.'))
return
lock_user(request.user)
for source in queryset:
split_pdf_job.delay(source.pk, request.user.pk)
messages.info(
request,
_('La séparation de PDF est en cours. '
'Revenez consulter les sources dans quelques minutes.'))
split_pdf.short_description = _('Séparer le PDF')
@register(Source)
class SourceAdmin(VersionAdmin, AutoriteAdmin):
form = SourceForm
list_display = (
'__str__', 'parent', 'position', 'date', 'type', 'has_events',
'has_program', 'link',
)
list_editable = ('parent', 'position', 'type', 'date')
list_select_related = ('type', 'etat', 'owner')
date_hierarchy = 'date'
search_fields = (
'type__nom__unaccent', 'titre__unaccent', 'date',
'date_approx__unaccent', 'numero__unaccent',
'lieu_conservation__unaccent', 'cote__unaccent')
list_filter = (SourceHasParentListFilter, 'type', 'titre',
SourceHasEventsListFilter, SourceHasProgramListFilter)
raw_id_fields = ('parent', 'evenements', 'editeurs_scientifiques')
autocomplete_lookup_fields = {
'fk': ('parent',),
'm2m': ('editeurs_scientifiques',),
}
related_lookup_fields = {
'm2m': ['evenements'],
}
readonly_fields = ('__str__', 'html', 'children_links')
inlines = (
AuteurInline, SourceIndividuInline, SourceOeuvreInline,
SourcePartieInline, SourceLieuInline, SourceEvenementInline,
SourceEnsembleInline,
)
actions = [split_pdf]
fieldsets = (
(None, {
'fields': (
('parent', 'position', 'est_promu'),
),
}),
(None, {
'fields': (
'type', 'titre', 'legende',
),
}),
(None, {
'fields': (
('date', 'date_approx'),
('numero', 'page', 'folio',),
('lieu_conservation', 'cote',),
'url',
)
}),
(_('Transcription'), {
'classes': ('grp-collapse grp-closed',),
'fields': ('transcription',),
}),
(None, {
'fields': (('fichier', 'telechargement_autorise'),),
}),
(None, {
'fields': ('children_links',),
}),
(_('Présentation'), {
'classes': ('grp-collapse grp-closed',),
'fields': (
'editeurs_scientifiques', 'date_publication', 'publications',
'developpements', 'presentation', 'contexte',
'sources_et_protocole', 'bibliographie',
),
})
)
fieldsets_and_inlines_order = ('f', 'f', 'f', 'f', 'f', 'i', 'i',
'i', 'i', 'i', 'i', 'i', 'f')
admin_fields = AutoriteAdmin.admin_fields + ('est_promue',)
formfield_overrides = {
TextField: {'widget': TinyMCE},
}
def get_queryset(self, request):
qs = super(SourceAdmin, self).get_queryset(request)
qs = qs.extra(
select={
'_has_events':
'EXISTS ('
' SELECT 1 FROM %(evenement)s '
' INNER JOIN %(m2m)s ON %(evenement)s.id '
' = %(m2m)s.evenement_id '
' WHERE %(m2m)s.source_id = %(source)s.id)' % {
'evenement': Evenement._meta.db_table,
'm2m': Source.evenements.field.m2m_db_table(),
'source': Source._meta.db_table,
},
'_has_program':
'EXISTS ('
' SELECT 1 FROM %(evenement)s '
' INNER JOIN %(m2m)s ON %(evenement)s.id '
' = %(m2m)s.evenement_id '
' WHERE (%(m2m)s.source_id = %(source)s.id '
' AND (%(evenement)s.relache = true '
' OR EXISTS (SELECT 1 FROM %(programme)s '
' WHERE %(programme)s.evenement_id '
' = %(evenement)s.id))))' % {
'evenement': Evenement._meta.db_table,
'm2m': Source.evenements.field.m2m_db_table(),
'source': Source._meta.db_table,
'programme': ElementDeProgramme._meta.db_table,
}
}
)
return qs
def change_view(self, request, object_id, form_url='', extra_context=None):
source = self.get_object(request, object_id)
if source is not None and isinstance(source.specific, (Video, Audio)):
change_url = source.get_change_url()
if change_url != request.path:
return redirect(change_url)
return super().change_view(
request, object_id, form_url=form_url, extra_context=extra_context,
)
def children_links(self, instance):
return format_html_join(
', ',
'<a href="{}">{}</a>',
[(child.get_change_url(), child.position)
for child in instance.children.order_by('position')]
)
children_links.short_description = _('Enfants')
@register(Audio)
class AudioAdmin(SourceAdmin):
readonly_fields = SourceAdmin.readonly_fields + (
'fichier_ogg', 'fichier_mpeg', 'extrait_ogg', 'extrait_mpeg',
'duree', 'duree_extrait',
)
fieldsets = (
SourceAdmin.fieldsets[0],
SourceAdmin.fieldsets[1],
SourceAdmin.fieldsets[2],
SourceAdmin.fieldsets[3],
(_('Fichiers'), {
'fields': (
('fichier', 'duree'),
('fichier_ogg', 'fichier_mpeg'),
('extrait', 'duree_extrait'),
('extrait_ogg', 'extrait_mpeg'),
),
}),
)
@register(Video)
class VideoAdmin(AudioAdmin):
readonly_fields = AudioAdmin.readonly_fields + (
'largeur', 'hauteur', 'largeur_extrait', 'hauteur_extrait',
)
fieldsets = (
SourceAdmin.fieldsets[0],
SourceAdmin.fieldsets[1],
SourceAdmin.fieldsets[2],
SourceAdmin.fieldsets[3],
(_('Fichiers'), {
'fields': (
('fichier', 'duree',
'largeur', 'hauteur'),
('fichier_ogg', 'fichier_mpeg'),
('extrait', 'duree_extrait',
'largeur_extrait', 'hauteur_extrait'),
('extrait_ogg', 'extrait_mpeg'),
),
}),
)
| {
"repo_name": "dezede/dezede",
"path": "libretto/admin.py",
"copies": "1",
"size": "39854",
"license": "bsd-3-clause",
"hash": -2499095942059710000,
"line_mean": 33.7900262467,
"line_max": 83,
"alpha_frac": 0.5680623664,
"autogenerated": false,
"ratio": 3.6535281146637266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47215904810637266,
"avg_score": null,
"num_lines": null
} |
from functools import partial, reduce
from flask import Blueprint, jsonify, render_template
from cachetools import cached, hashkey
from viewserver.funcs import pretty_bytes
from viewserver.ext.caching import TTL_CACHE
from viewserver.ext.periodic import relay_state
blu = Blueprint('core', __name__)
@cached(TTL_CACHE, key=partial(hashkey, 'status'))
def get_frame_data(frames):
results = {}
frames = max(1, frames)
for key, val in relay_state.items():
# only get the most recent status
# from the collected states
if frames == 1:
results[key] = val[-1]
else:
results[key] = list(val)[(-1 * frames):]
return results
@blu.route('/')
@cached(TTL_CACHE, key=partial(hashkey, 'index'))
def index():
data = get_frame_data(1)
mapped = list(map(lambda x: (x['meta']['uptime'], x['host'], x['meta']['provided_by']), data.values()))
newest, oldest = {}, {}
if mapped:
newest['uptime'], newest['ip'], newest['owner'] = min(mapped)
oldest['uptime'], oldest['ip'], oldest['owner'] = max(mapped)
total_bytes = sum(map(lambda x: x['meta']['bytes_proxied']['raw'], data.values())) or 0
total_bytes = pretty_bytes(total_bytes).upper()
return render_template('index.html',
top_data=data,
newest=newest,
oldest=oldest,
total_bytes=total_bytes)
@blu.route('/status/')
@blu.route('/status/<int:frames>')
def status(frames=1):
results = get_frame_data(frames)
return jsonify(results)
| {
"repo_name": "blakev/syncthing-relaysrv-status",
"path": "viewserver/views.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": 3869246328695985000,
"line_mean": 28.7962962963,
"line_max": 107,
"alpha_frac": 0.6016159105,
"autogenerated": false,
"ratio": 3.715935334872979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4817551245372979,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from gooey.gui.util.filedrop import FileDrop
__author__ = 'Chris'
from abc import ABCMeta, abstractmethod
import wx
from gooey.gui.widgets.calender_dialog import CalendarDlg
class WidgetPack(object):
"""
Interface specifying the contract to which
all `WidgetPack`s will adhere
"""
__metaclass__ = ABCMeta
@abstractmethod
def build(self, parent, data):
pass
@abstractmethod
def getValue(self):
pass
def onResize(self, evt):
pass
@staticmethod
def get_command(data):
return data['commands'][0] if data['commands'] else ''
class BaseChooser(WidgetPack):
def __init__(self, button_text='Browse'):
self.button_text = button_text
self.option_string = None
self.parent = None
self.text_box = None
self.button = None
def build(self, parent, data=None):
self.parent = parent
self.option_string = data['commands'][0] if data['commands'] else ''
self.text_box = wx.TextCtrl(self.parent)
self.text_box.SetMinSize((0, -1))
dt = FileDrop(self.text_box)
self.text_box.SetDropTarget(dt)
self.button = wx.Button(self.parent, label=self.button_text, size=(73, 23))
widget_sizer = wx.BoxSizer(wx.HORIZONTAL)
widget_sizer.Add(self.text_box, 1, wx.EXPAND)
widget_sizer.AddSpacer(10)
widget_sizer.Add(self.button, 0)
parent.Bind(wx.EVT_BUTTON, self.onButton, self.button)
return widget_sizer
def getValue(self):
value = self.text_box.GetValue()
if self.option_string and value:
return '{0} "{1}"'.format(self.option_string, value)
else:
return '"{}"'.format(value) if value else None
def onButton(self, evt):
raise NotImplementedError
class BaseFileChooser(BaseChooser):
def __init__(self, dialog):
BaseChooser.__init__(self)
self.dialog = dialog
def onButton(self, evt):
dlg = self.dialog(self.parent)
result = (dlg.GetPath()
if dlg.ShowModal() == wx.ID_OK
else None)
if result:
# self.text_box references a field on the class this is passed into
# kinda hacky, but avoided a buncha boilerplate
self.text_box.SetValue(result)
def build_dialog(style, exist_constraint=True, **kwargs):
if exist_constraint:
return lambda panel: wx.FileDialog(panel, style=style | wx.FD_FILE_MUST_EXIST, **kwargs)
else:
return lambda panel: wx.FileDialog(panel, style=style, **kwargs)
FileChooserPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_OPEN))
FileSaverPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_SAVE, False, defaultFile="Enter Filename"))
MultiFileSaverPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_MULTIPLE, False))
DirChooserPayload = partial(BaseFileChooser, dialog=lambda parent: wx.DirDialog(parent, 'Select Directory', style=wx.DD_DEFAULT_STYLE))
DateChooserPayload = partial(BaseFileChooser, dialog=CalendarDlg)
class TextInputPayload(WidgetPack):
def __init__(self):
self.widget = None
self.option_string = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.TextCtrl(parent)
dt = FileDrop(self.widget)
self.widget.SetDropTarget(dt)
self.widget.SetMinSize((0, -1))
self.widget.SetDoubleBuffered(True)
return self.widget
def getValue(self):
if self.widget.GetValue() and self.option_string:
return '{} {}'.format(self.option_string, self.widget.GetValue())
else:
return self.widget.GetValue()
def _SetValue(self, text):
# used for testing
self.widget.SetLabelText(text)
class DropdownPayload(WidgetPack):
default_value = 'Select Option'
def __init__(self):
self.option_string = None
self.widget = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value=self.default_value,
choices=data['choices'],
style=wx.CB_DROPDOWN
)
return self.widget
def getValue(self):
if self.widget.GetValue() == self.default_value:
return ''
elif self.widget.GetValue() and self.option_string:
return '{} {}'.format(self.option_string, self.widget.GetValue())
else:
self.widget.GetValue()
def _SetValue(self, text):
# used for testing
self.widget.SetLabelText(text)
class CounterPayload(WidgetPack):
def __init__(self):
self.option_string = None
self.widget = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value='',
choices=[str(x) for x in range(1, 7)],
style=wx.CB_DROPDOWN
)
return self.widget
def getValue(self):
'''
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
'''
dropdown_value = self.widget.GetValue()
if not str(dropdown_value).isdigit():
return ''
arg = str(self.option_string).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args
| {
"repo_name": "jonathanlurie/timelapseComposer",
"path": "lib/python/gooey/gui/widgets/widget_pack.py",
"copies": "2",
"size": "5287",
"license": "mit",
"hash": 2080243156459443500,
"line_mean": 25.8263157895,
"line_max": 139,
"alpha_frac": 0.6451673917,
"autogenerated": false,
"ratio": 3.641184573002755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5286351964702755,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from math import cos, sin, sqrt, pi, ceil
import numpy as np
from numpy.linalg import norm
import scipy.optimize as so
Z_AXIS = np.array([0.0, 0.0, 1.0])
EARTH_A = 1.00000261
EARTH_E = 0.01671123
EARTH_I = 0.0
EARTH_W = np.radians(114.20783)
EARTH_OM = np.radians(348.73936)
def get_orbpoint_earth(t, method='direct'):
return get_orbpoint(t, EARTH_A, EARTH_E, EARTH_W, EARTH_I, EARTH_OM, method=method)
def get_orbpoint(t, a, e, w, i, om, method='direct'):
if method == 'direct':
return get_orbpoint_direct(t, a, e, w, i, om)
elif method == 'rotation':
return get_orbpoint_rotation(t, a, e, w, i, om)
else:
raise AttributeError('method "%s" is not specified.' % method)
def get_orbpoints(a, e, w, i, om, numpoints=100, method='direct'):
theta = _get_nonuniform_angles(n=numpoints)
if method == 'direct':
points_hc = np.array([get_orbpoint_direct(t, a, e, w, i, om) for t in theta])
elif method == 'rotation':
points_flat = np.array([_orbpoint_flat(t, a, e) for t in theta])
axis_w = np.array([cos(-w), sin(-w), 0.0])
rotw = _rotmatrix(axis_w, i)
points_inc = np.array([np.dot(point, rotw) for point in points_flat])
wb = om + w
rotz = _rotmatrix(Z_AXIS, wb)
points_hc = np.array([np.dot(point, rotz) for point in points_inc])
return points_hc
def get_orbpoint_direct(t, a, e, w, i, om):
r = _get_r(t, a, e)
x = r * (cos(om) * cos(t + w) - sin(om) * sin(t + w) * cos(i))
y = r * (sin(om) * cos(t + w) + cos(om) * sin(t + w) * cos(i))
z = r * (sin(t + w) * sin(i))
point = np.array([x, y, z])
return point
def get_orbpoint_rotation(t, a, e, w, i, om):
point = _orbpoint_flat(t, a, e) # point in orbital plane:
axis_w = np.array([cos(-w), sin(-w), 0.0])
point_inc = _rotate(point, axis_w, i) # get inclined point:
wb = om + w
point_hc = _rotate(point_inc, Z_AXIS, wb) # point in heliocentric coords:
return point_hc
def _orbpoint_flat(t, a, e):
r = _get_r(t, a, e)
x = r * cos(t)
y = r * sin(t)
return [x, y, 0]
def _get_r(t, a, e):
r = a*(1 - e**2)/(1 + e*cos(t))
return r
def _rotate(point, ax, angle):
rot = _rotmatrix(ax, angle)
return np.dot(point, rot)
def _rotmatrix(ax, angle):
cosa = cos(angle)
sina = sin(angle)
x, y, z = ax
rot = np.array([[cosa + (x**2)*(1 - cosa), x*y*(1 - cosa) - z*sina, x*z*(1 - cosa) + y*sina],
[y*x*(1 - cosa) + z*sina, cosa + (y**2)*(1 - cosa), y*z*(1 - cosa) - x*sina],
[z*x*(1 - cosa) - y*sina, z*y*(1 - cosa) + x*sina, cosa + (z**2)*(1 - cosa)]])
return rot
def _find_dist(t, a, e, w, i, om):
asteroid_point = get_orbpoint(t[0], a, e, w, i, om)
earth_point = get_orbpoint_earth(t[1])
dist = norm(asteroid_point - earth_point)
return dist
def get_moid(a, e, w, i, om):
"Returns Minimal Earth Orbit Intersection Distance"
ta0 = [(w - pi*0.5), (w - pi*0.5), (w + pi*0.5), (w + pi*0.5)]
te0 = [(om - pi*0.5), (om + pi*0.5), (om + pi*0.5), (om - pi*0.5)]
moid_min = 5.45492 # Jupiter aphelion
for ta, te in zip(ta0, te0):
ta_te_min = so.fmin(partial(_find_dist, a=a, e=e, w=w, i=i, om=om), [ta, te], disp=False)
moid = _find_dist(ta_te_min, a, e, w, i, om)
moid_min = min(moid_min, moid)
return moid_min
def _get_nonuniform_angles(n=100):
# theta = np.linspace(0, 2*pi, numpoints)
theta = []
delta = pi/n
t = 0
base = 0
# add, base = 0
for p in range(n+1):
angle = abs(base - pi*sin(t)) + base
t += delta
theta.append(angle)
if p == ceil(n/2):
base = pi
theta = np.asarray(theta)
# angles = [pi*p/float(numpoints) for p in range(numpoints)]
# theta1 = [2*pi*sin(pi*an/float(numpoints)) for an in range(numpoints)]
# logspace_pi = np.logspace(0.01, 1, int(numpoints*0.5))*0.1
# lsp1 = (1 - logspace_pi)
# sp1 = np.sort(pi*(lsp1 - lsp1[0] + 1))
# lsp2 = (logspace_pi-logspace_pi[0])
# sp2 = lsp2*pi/lsp2[-1] + pi
# theta = np.concatenate((sp1,sp2[1:]))
# print "theta:", theta
# print "theta1:", theta1
# print
return theta
| {
"repo_name": "nomad-vagabond/asterion",
"path": "calculate_orbits.py",
"copies": "1",
"size": "4401",
"license": "mit",
"hash": -5227596828964264000,
"line_mean": 32.3828125,
"line_max": 98,
"alpha_frac": 0.5339695524,
"autogenerated": false,
"ratio": 2.5148571428571427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8511483117931669,
"avg_score": 0.007468715465094738,
"num_lines": 128
} |
from functools import partial
from random import random, randint, choice
import pygame
import init as _
from baseclass import BaseClass
from options import Options
try:
from cython_ import collide
except ImportError:
from python_ import collide
from miscellaneous import further_than, scale
from tile import Tile
class PickUp(BaseClass):
"""Creates a PickUp
Params:
x: x coordinate of the PickUp
y: y coordinate of the PickUp
spawn_tile: The index of the tile on which the PickUp is
type_: A float between 0 and 1. If it is over 2/3 the PickUp is ammo else health
Example:
>>> tile = Tile.instances[PickUp.spawn_tiles[0]]
>>> a = PickUp(*tile.pos, PickUp.spawn_tiles[0], type_=0.9)
>>> a.type
"health"
TODO: Add more pick ups"""
with open(Options.mappath) as file:
spawn_tiles = [
i for i, x in enumerate(file.read().replace("\n", "")) if x == "P"
]
init_round, left_round = 4, 4
zombie_init_round = None
images = {"ammo": scale(pygame.image.load("assets/Images/PickUps/ammo.png")),
"health": scale(pygame.image.load("assets/Images/PickUps/health.png"))}
sounds = {"ammo": pygame.mixer.Sound("assets/Audio/PickUp/ammo_short.ogg"),
"health": pygame.mixer.Sound("assets/Audio/PickUp/health.ogg")}
sounds["ammo"].set_volume(Options.volume)
sounds["health"].set_volume(Options.volume)
instances = set()
def __init__(self, x, y, spawn_tile, type_):
super().__init__(x, y)
PickUp.instances.add(self)
self.incr = randint(20, 35)
self.spawn_tile = spawn_tile
self.type = "ammo" if type_ < 2 / 3 else "health"
PickUp.spawn_tiles.remove(spawn_tile)
@classmethod
def spawn(cls, survivor):
_further_than = partial(further_than, survivor=survivor, min_dist=150)
pos_spawn_tiles = list(filter(_further_than, cls.spawn_tiles))
if not pos_spawn_tiles: # If no pick-up spawn is far enough away
if not cls.spawn_tiles: # If all pick-up spawns are occupied, don"t spawn
return
pos_spawn_tiles.extend(cls.spawn_tiles)
cls.left_round -= 1
type_ = random()
spawn_tile = choice(pos_spawn_tiles)
spawn_node = Tile.instances[spawn_tile]
cls(*spawn_node.pos, spawn_tile, type_)
@classmethod
def update(cls, screen, survivor, total_frames):
if cls.left_round:
try:
if total_frames % ((Options.fps * cls.zombie_init_round * 2) //
cls.init_round) == 0:
cls.spawn(survivor)
except ZeroDivisionError:
if total_frames % Options.fps * 10 == 0:
cls.spawn(survivor)
del_pick_up = set()
for pick_up in cls.instances:
screen.blit(cls.images[pick_up.type], pick_up.pos.as_ints())
if collide(*pick_up.pos, *pick_up._size, *survivor.pos, *survivor._size):
setattr(survivor, pick_up.type,
getattr(survivor, pick_up.type) + pick_up.incr)
cls.sounds[pick_up.type].play()
cls.spawn_tiles.append(pick_up.spawn_tile)
del_pick_up.add(pick_up)
del pick_up
cls.instances -= del_pick_up
if __name__ == "__main__":
Tile.create()
import doctest
doctest.testmod()
| {
"repo_name": "thdb-theo/Zombie-Survival",
"path": "src/pickup.py",
"copies": "1",
"size": "3543",
"license": "mit",
"hash": 2143183487084644600,
"line_mean": 34.5257731959,
"line_max": 86,
"alpha_frac": 0.578041208,
"autogenerated": false,
"ratio": 3.5608040201005027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635854081612947,
"avg_score": 0.0005982292975112508,
"num_lines": 97
} |
from functools import partial
from virtman.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CHAIN_TRIES = 3
class Chain(object):
"""
build chain, roll over if failed
"""
def __init__(self):
self._chain = []
def add_step(self, do, undo):
assert callable(do) and callable(undo), "%s and %s must be callable" % \
(do, undo)
self._chain.append((do, undo))
def do(self):
stop_flag = False
global LOG
LOG.debug("Virtman: Chain is: %s" %
[(self._get_func(x), self._get_func(y)) for (x, y) in
self._chain])
for i in range(len(self._chain)):
tries = 0
suc = False
while tries < CHAIN_TRIES and suc is False:
try:
self._chain[i][0]()
suc = True
except Exception:
self._chain[i][1]()
tries += 1
finally:
if tries < CHAIN_TRIES:
LOG.debug("Virtman: Chain try:%s for %s suc:%s" % (
tries, self._get_func(self._chain[i][0]), suc))
if tries == CHAIN_TRIES and suc is False:
while i >= 0:
self._chain[i][1]()
i -= 1
stop_flag = True
if stop_flag:
raise
@staticmethod
def _get_func(callable_fun):
if hasattr(callable_fun, 'func'):
return callable_fun.func
return callable_fun
if __name__ == "__main__":
" Test and demostrate Chain itself "
import sys
from oslo.config import cfg
CONF = cfg.CONF
CONF(sys.argv[1:], project=__name__, default_config_files=[
'/etc/virtman/virtman.conf'])
logging.setup(__name__)
c = Chain()
def asdf(n):
print '%s' % n
def qwer(n):
print 'except %s' % n
raise OverflowError()
print sys.argv
c.add_step(partial(asdf, 1), partial(asdf, -1))
c.add_step(partial(asdf, 2), partial(asdf, -2))
c.add_step(partial(asdf, 3), partial(asdf, -3))
c.do()
# c.add_step(lambda: asdf(1), lambda: asdf(-1))
# c.add_step(lambda: asdf(2), lambda: asdf(-2))
# c.add_step(lambda: asdf(3), lambda: asdf(-3))
# c.do() | {
"repo_name": "vmthunder/packages",
"path": "virtman/virtman/utils/chain.py",
"copies": "2",
"size": "2480",
"license": "apache-2.0",
"hash": 3146332019025019000,
"line_mean": 27.9036144578,
"line_max": 80,
"alpha_frac": 0.4681451613,
"autogenerated": false,
"ratio": 3.8689547581903274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014343086632243257,
"num_lines": 83
} |
from functools import partial
import climenu
def print_var(variable):
'''print the variable'''
print(str(variable))
def build_items(count):
# In this example, we're generating menu items based on some
# thing that's determined at runtime (e.g. files in a directory).
# For this case, we're simply using `range` to generate a range of
# items. The function that eventually gets called takes 1 argument.
# Therefore, we need to use ``partial`` to pass in those arguments at
# runtime.
items = []
for index in range(count):
items.append(
(
'Run item %i' % (index + 1),
partial(print_var, 'Item %i' % (index + 1))
)
)
return items
@climenu.menu(title='Do the first thing')
def first_thing():
# A simple menu item
print('Did the first thing!')
@climenu.group(items_getter=build_items, items_getter_kwargs={'count': 7})
def build_group():
'''A dynamic menu'''
# This is just a placeholder for a MenuGroup. The items in the menu
# will be dymanically generated when this module loads by calling
# `build_items`.
pass
if __name__ == '__main__':
climenu.run()
| {
"repo_name": "mtik00/pyclimenu",
"path": "examples/dynamic-group.py",
"copies": "1",
"size": "1259",
"license": "mit",
"hash": 9176717925029500000,
"line_mean": 25.3695652174,
"line_max": 74,
"alpha_frac": 0.597299444,
"autogenerated": false,
"ratio": 3.9841772151898733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5081476659189873,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import os
from _pydev_bundle._pydev_imports_tipper import TYPE_IMPORT, TYPE_CLASS, TYPE_FUNCTION, TYPE_ATTR, \
TYPE_BUILTIN, TYPE_PARAM
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle._debug_adapter import pydevd_schema
from _pydevd_bundle._debug_adapter.pydevd_schema import ModuleEvent, ModuleEventBody, Module, \
OutputEventBody, OutputEvent, ContinuedEventBody
from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_CREATE, CMD_RETURN, CMD_MODULE_EVENT, \
CMD_WRITE_TO_CONSOLE, CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, \
CMD_STEP_RETURN, CMD_STEP_CAUGHT_EXCEPTION, CMD_ADD_EXCEPTION_BREAK, CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, \
CMD_THREAD_RESUME_SINGLE_NOTIFICATION, CMD_THREAD_KILL, CMD_STOP_ON_START, CMD_INPUT_REQUESTED, \
CMD_EXIT, CMD_STEP_INTO_COROUTINE, CMD_STEP_RETURN_MY_CODE, CMD_SMART_STEP_INTO
from _pydevd_bundle.pydevd_constants import get_thread_id, dict_values, ForkSafeLock
from _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_utils import get_non_pydevd_threads
import pydevd_file_utils
from _pydevd_bundle.pydevd_comm import build_exception_info_response
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle import pydevd_frame_utils, pydevd_constants, pydevd_utils
import linecache
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
try:
from StringIO import StringIO
except:
from io import StringIO
class ModulesManager(object):
def __init__(self):
self._lock = ForkSafeLock()
self._modules = {}
self._next_id = partial(next, itertools.count(0))
def track_module(self, filename_in_utf8, module_name, frame):
'''
:return list(NetCommand):
Returns a list with the module events to be sent.
'''
if filename_in_utf8 in self._modules:
return []
module_events = []
with self._lock:
# Must check again after getting the lock.
if filename_in_utf8 in self._modules:
return
try:
version = str(frame.f_globals.get('__version__', ''))
except:
version = '<unknown>'
try:
package_name = str(frame.f_globals.get('__package__', ''))
except:
package_name = '<unknown>'
module_id = self._next_id()
module = Module(module_id, module_name, filename_in_utf8)
if version:
module.version = version
if package_name:
# Note: package doesn't appear in the docs but seems to be expected?
module.kwargs['package'] = package_name
module_event = ModuleEvent(ModuleEventBody('new', module))
module_events.append(NetCommand(CMD_MODULE_EVENT, 0, module_event, is_json=True))
self._modules[filename_in_utf8] = module.to_dict()
return module_events
def get_modules_info(self):
'''
:return list(Module)
'''
with self._lock:
return dict_values(self._modules)
class NetCommandFactoryJson(NetCommandFactory):
'''
Factory for commands which will provide messages as json (they should be
similar to the debug adapter where possible, although some differences
are currently Ok).
Note that it currently overrides the xml version so that messages
can be done one at a time (any message not overridden will currently
use the xml version) -- after having all messages handled, it should
no longer use NetCommandFactory as the base class.
'''
def __init__(self):
NetCommandFactory.__init__(self)
self.modules_manager = ModulesManager()
@overrides(NetCommandFactory.make_version_message)
def make_version_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_protocol_set_message)
def make_protocol_set_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_created_message)
def make_thread_created_message(self, thread):
# Note: the thread id for the debug adapter must be an int
# (make the actual id from get_thread_id respect that later on).
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('started', get_thread_id(thread)),
)
return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_thread_killed_message)
def make_thread_killed_message(self, tid):
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('exited', tid),
)
return NetCommand(CMD_THREAD_KILL, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_list_threads_message)
def make_list_threads_message(self, py_db, seq):
threads = []
for thread in get_non_pydevd_threads():
if is_thread_alive(thread):
thread_id = get_thread_id(thread)
# Notify that it's created (no-op if we already notified before).
py_db.notify_thread_created(thread_id, thread)
thread_schema = pydevd_schema.Thread(id=thread_id, name=thread.getName())
threads.append(thread_schema.to_dict())
body = pydevd_schema.ThreadsResponseBody(threads)
response = pydevd_schema.ThreadsResponse(
request_seq=seq, success=True, command='threads', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_get_completions_message)
def make_get_completions_message(self, seq, completions, qualifier, start):
COMPLETION_TYPE_LOOK_UP = {
TYPE_IMPORT: pydevd_schema.CompletionItemType.MODULE,
TYPE_CLASS: pydevd_schema.CompletionItemType.CLASS,
TYPE_FUNCTION: pydevd_schema.CompletionItemType.FUNCTION,
TYPE_ATTR: pydevd_schema.CompletionItemType.FIELD,
TYPE_BUILTIN: pydevd_schema.CompletionItemType.KEYWORD,
TYPE_PARAM: pydevd_schema.CompletionItemType.VARIABLE,
}
qualifier = qualifier.lower()
qualifier_len = len(qualifier)
targets = []
for completion in completions:
label = completion[0]
if label.lower().startswith(qualifier):
completion = pydevd_schema.CompletionItem(
label=label, type=COMPLETION_TYPE_LOOK_UP[completion[3]], start=start, length=qualifier_len)
targets.append(completion.to_dict())
body = pydevd_schema.CompletionsResponseBody(targets)
response = pydevd_schema.CompletionsResponse(
request_seq=seq, success=True, command='completions', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _format_frame_name(self, fmt, initial_name, module_name, line, path):
if fmt is None:
return initial_name
frame_name = initial_name
if fmt.get('module', False):
if module_name:
if initial_name == '<module>':
frame_name = module_name
else:
frame_name = '%s.%s' % (module_name, initial_name)
else:
basename = os.path.basename(path)
basename = basename[0:-3] if basename.lower().endswith('.py') else basename
if initial_name == '<module>':
frame_name = '%s in %s' % (initial_name, basename)
else:
frame_name = '%s.%s' % (basename, initial_name)
if fmt.get('line', False):
frame_name = '%s : %d' % (frame_name, line)
return frame_name
@overrides(NetCommandFactory.make_get_thread_stack_message)
def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):
frames = []
module_events = []
try:
# : :type suspended_frames_manager: SuspendedFramesManager
suspended_frames_manager = py_db.suspended_frames_manager
frames_list = suspended_frames_manager.get_frames_list(thread_id)
if frames_list is None:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
else:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)
for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno, applied_mapping, show_as_current_frame in self._iter_visible_frames_info(
py_db, frames_list
):
try:
module_name = str(frame.f_globals.get('__name__', ''))
except:
module_name = '<unknown>'
module_events.extend(self.modules_manager.track_module(filename_in_utf8, module_name, frame))
presentation_hint = None
if not getattr(frame, 'IS_PLUGIN_FRAME', False): # Never filter out plugin frames!
if py_db.is_files_filter_enabled and py_db.apply_files_filter(frame, original_filename, False):
continue
if not py_db.in_project_scope(frame):
presentation_hint = 'subtle'
formatted_name = self._format_frame_name(fmt, method_name, module_name, lineno, filename_in_utf8)
if show_as_current_frame:
formatted_name += ' (Current frame)'
source_reference = pydevd_file_utils.get_client_filename_source_reference(filename_in_utf8)
if not source_reference and not applied_mapping and not os.path.exists(original_filename):
if getattr(frame.f_code, 'co_lnotab', None):
# Create a source-reference to be used where we provide the source by decompiling the code.
# Note: When the time comes to retrieve the source reference in this case, we'll
# check the linecache first (see: get_decompiled_source_from_frame_id).
source_reference = pydevd_file_utils.create_source_reference_for_frame_id(frame_id, original_filename)
else:
# Check if someone added a source reference to the linecache (Python attrs does this).
if linecache.getline(original_filename, 1):
source_reference = pydevd_file_utils.create_source_reference_for_linecache(
original_filename)
frames.append(pydevd_schema.StackFrame(
frame_id, formatted_name, lineno, column=1, source={
'path': filename_in_utf8,
'sourceReference': source_reference,
},
presentationHint=presentation_hint).to_dict())
finally:
topmost_frame = None
for module_event in module_events:
py_db.writer.add_command(module_event)
total_frames = len(frames)
stack_frames = frames
if bool(levels):
start = start_frame
end = min(start + levels, total_frames)
stack_frames = frames[start:end]
response = pydevd_schema.StackTraceResponse(
request_seq=seq,
success=True,
command='stackTrace',
body=pydevd_schema.StackTraceResponseBody(stackFrames=stack_frames, totalFrames=total_frames))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_warning_message)
def make_warning_message(self, msg):
category = 'console'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
@overrides(NetCommandFactory.make_io_message)
def make_io_message(self, msg, ctx):
category = 'stdout' if int(ctx) == 1 else 'stderr'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
_STEP_REASONS = set([
CMD_STEP_INTO,
CMD_STEP_INTO_MY_CODE,
CMD_STEP_OVER,
CMD_STEP_OVER_MY_CODE,
CMD_STEP_RETURN,
CMD_STEP_RETURN_MY_CODE,
CMD_STEP_INTO_MY_CODE,
CMD_STOP_ON_START,
CMD_STEP_INTO_COROUTINE,
CMD_SMART_STEP_INTO,
])
_EXCEPTION_REASONS = set([
CMD_STEP_CAUGHT_EXCEPTION,
CMD_ADD_EXCEPTION_BREAK,
])
@overrides(NetCommandFactory.make_thread_suspend_single_notification)
def make_thread_suspend_single_notification(self, py_db, thread_id, stop_reason):
exc_desc = None
exc_name = None
thread = pydevd_find_thread_by_id(thread_id)
info = set_additional_thread_info(thread)
if stop_reason in self._STEP_REASONS:
if info.pydev_original_step_cmd == CMD_STOP_ON_START:
# Just to make sure that's not set as the original reason anymore.
info.pydev_original_step_cmd = -1
stop_reason = 'entry'
else:
stop_reason = 'step'
elif stop_reason in self._EXCEPTION_REASONS:
stop_reason = 'exception'
elif stop_reason == CMD_SET_BREAK:
stop_reason = 'breakpoint'
elif stop_reason == CMD_SET_NEXT_STATEMENT:
stop_reason = 'goto'
else:
stop_reason = 'pause'
if stop_reason == 'exception':
exception_info_response = build_exception_info_response(
py_db, thread_id, -1, set_additional_thread_info, self._iter_visible_frames_info, max_frames=-1)
exception_info_response
exc_name = exception_info_response.body.exceptionId
exc_desc = exception_info_response.body.description
body = pydevd_schema.StoppedEventBody(
reason=stop_reason,
description=exc_desc,
threadId=thread_id,
text=exc_name,
allThreadsStopped=True,
preserveFocusHint=stop_reason not in ['step', 'exception', 'breakpoint', 'entry', 'goto'],
)
event = pydevd_schema.StoppedEvent(body)
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_thread_resume_single_notification)
def make_thread_resume_single_notification(self, thread_id):
body = ContinuedEventBody(threadId=thread_id, allThreadsContinued=True)
event = pydevd_schema.ContinuedEvent(body)
return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_set_next_stmnt_status_message)
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
response = pydevd_schema.GotoResponse(
request_seq=int(seq),
success=is_success,
command='goto',
body={},
message=(None if is_success else exception_msg))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_send_curr_exception_trace_message)
def make_send_curr_exception_trace_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_curr_exception_trace_proceeded_message)
def make_send_curr_exception_trace_proceeded_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_breakpoint_exception_message)
def make_send_breakpoint_exception_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_process_created_message)
def make_process_created_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_suspend_message)
def make_thread_suspend_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_run_message)
def make_thread_run_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_reloaded_code_message)
def make_reloaded_code_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_input_requested_message)
def make_input_requested_message(self, started):
event = pydevd_schema.PydevdInputRequestedEvent(body={})
return NetCommand(CMD_INPUT_REQUESTED, 0, event, is_json=True)
@overrides(NetCommandFactory.make_skipped_step_in_because_of_filters)
def make_skipped_step_in_because_of_filters(self, py_db, frame):
msg = 'Frame skipped from debugging during step-in.'
if py_db.get_use_libraries_filter():
msg += ('\nNote: may have been skipped because of "justMyCode" option (default == true). '
'Try setting \"justMyCode\": false in the debug configuration (e.g., launch.json).\n')
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_evaluation_timeout_msg)
def make_evaluation_timeout_msg(self, py_db, expression, curr_thread):
msg = '''Evaluating: %s did not finish after %.2f seconds.
This may mean a number of things:
- This evaluation is really slow and this is expected.
In this case it's possible to silence this error by raising the timeout, setting the
PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.
- The evaluation may need other threads running while it's running:
In this case, it's possible to set the PYDEVD_UNBLOCK_THREADS_TIMEOUT
environment variable so that if after a given timeout an evaluation doesn't finish,
other threads are unblocked or you can manually resume all threads.
Alternatively, it's also possible to skip breaking on a particular thread by setting a
`pydev_do_not_trace = True` attribute in the related threading.Thread instance
(if some thread should always be running and no breakpoints are expected to be hit in it).
- The evaluation is deadlocked:
In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT
environment variable to true so that a thread dump is shown along with this message and
optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger
tries to interrupt the evaluation (if possible) when this happens.
''' % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)
if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:
stream = StringIO()
pydevd_utils.dump_threads(stream, show_pydevd_threads=False)
msg += '\n\n%s\n' % stream.getvalue()
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_exit_command)
def make_exit_command(self, py_db):
event = pydevd_schema.TerminatedEvent(pydevd_schema.TerminatedEventBody())
return NetCommand(CMD_EXIT, 0, event, is_json=True)
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py",
"copies": "1",
"size": "20446",
"license": "mit",
"hash": 8683358603630664000,
"line_mean": 44.0495495495,
"line_max": 164,
"alpha_frac": 0.6284358799,
"autogenerated": false,
"ratio": 4.013741656851198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016799927661328223,
"num_lines": 444
} |
from functools import partial
import re
from gooey.gui.lang import i18n
from gooey.gui.util.filedrop import FileDrop
from gooey.gui.util.quoting import maybe_quote
__author__ = 'Chris'
from abc import ABCMeta, abstractmethod
import os
import wx
import wx.lib.agw.multidirdialog as MDD
from gooey.gui.widgets.calender_dialog import CalendarDlg
class WidgetPack(object):
"""
Interface specifying the contract to which
all `WidgetPack`s will adhere
"""
__metaclass__ = ABCMeta
@abstractmethod
def build(self, parent, data):
pass
@abstractmethod
def getValue(self):
pass
def onResize(self, evt):
pass
@staticmethod
def get_command(data):
return data['commands'][0] if data['commands'] else ''
class BaseChooser(WidgetPack):
def __init__(self, button_text=''):
self.button_text = i18n._('browse')
self.option_string = None
self.parent = None
self.text_box = None
self.button = None
def build(self, parent, data=None):
self.parent = parent
self.option_string = data['commands'][0] if data['commands'] else ''
self.text_box = wx.TextCtrl(self.parent)
self.text_box.AppendText(safe_default(data, ''))
self.text_box.SetMinSize((0, -1))
dt = FileDrop(self.text_box)
self.text_box.SetDropTarget(dt)
self.button = wx.Button(self.parent, label=self.button_text, size=(73, 23))
widget_sizer = wx.BoxSizer(wx.HORIZONTAL)
widget_sizer.Add(self.text_box, 1, wx.EXPAND)
widget_sizer.AddSpacer(10)
widget_sizer.Add(self.button, 0)
parent.Bind(wx.EVT_BUTTON, self.onButton, self.button)
return widget_sizer
def getValue(self):
value = self.text_box.GetValue()
if self.option_string and value:
return '{0} {1}'.format(self.option_string, maybe_quote(value))
else:
return maybe_quote(value) if value else ''
def onButton(self, evt):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class BaseFileChooser(BaseChooser):
def __init__(self, dialog):
BaseChooser.__init__(self)
self.dialog = dialog
def onButton(self, evt):
dlg = self.dialog(self.parent)
result = (self.get_path(dlg)
if dlg.ShowModal() == wx.ID_OK
else None)
if result:
self.text_box.SetValue(result)
def get_path(self, dlg):
if isinstance(dlg, wx.DirDialog):
return maybe_quote(dlg.GetPath())
else:
paths = dlg.GetPaths()
return maybe_quote(paths[0]) if len(paths) < 2 else ' '.join(map(maybe_quote, paths))
class MyMultiDirChooser(MDD.MultiDirDialog):
def __init(self, *args, **kwargs):
super(MyMultiDirChooser,self).__init__(*args, **kwargs)
def GetPaths(self):
return self.dirCtrl.GetPaths()
def build_dialog(style, exist_constraint=True, **kwargs):
if exist_constraint:
return lambda panel: wx.FileDialog(panel, style=style | wx.FD_FILE_MUST_EXIST, **kwargs)
else:
return lambda panel: wx.FileDialog(panel, style=style, **kwargs)
FileChooserPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_OPEN))
FileSaverPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_SAVE, False, defaultFile="Enter Filename"))
MultiFileSaverPayload = partial(BaseFileChooser, dialog=build_dialog(wx.FD_MULTIPLE, False))
DirChooserPayload = partial(BaseFileChooser, dialog=lambda parent: wx.DirDialog(parent, 'Select Directory', style=wx.DD_DEFAULT_STYLE))
DateChooserPayload = partial(BaseFileChooser, dialog=CalendarDlg)
MultiDirChooserPayload = partial(BaseFileChooser, dialog=lambda parent: MyMultiDirChooser(parent, title="Select Directories", defaultPath=os.getcwd(), agwStyle=MDD.DD_MULTIPLE|MDD.DD_DIR_MUST_EXIST))
class TextInputPayload(WidgetPack):
def __init__(self):
self.widget = None
self.option_string = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.TextCtrl(parent)
dt = FileDrop(self.widget)
self.widget.SetDropTarget(dt)
self.widget.SetMinSize((0, -1))
self.widget.SetDoubleBuffered(True)
self.widget.AppendText(safe_default(data, ''))
return self.widget
def getValue(self):
value = self.widget.GetValue()
if value and self.option_string:
return '{} {}'.format(self.option_string, value)
else:
return '"{}"'.format(value) if value else ''
def _SetValue(self, text):
# used for testing
self.widget.SetLabelText(text)
class DropdownPayload(WidgetPack):
default_value = 'Select Option'
def __init__(self):
self.option_string = None
self.widget = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value=safe_default(data, self.default_value),
choices=data['choices'],
style=wx.CB_DROPDOWN
)
return self.widget
def getValue(self):
if self.widget.GetValue() == self.default_value:
return ''
elif self.widget.GetValue() and self.option_string:
return '{} {}'.format(self.option_string, self.widget.GetValue())
else:
return self.widget.GetValue()
def _SetValue(self, text):
# used for testing
self.widget.SetLabelText(text)
class CounterPayload(WidgetPack):
def __init__(self):
self.option_string = None
self.widget = None
def build(self, parent, data):
self.option_string = self.get_command(data)
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value=safe_default(data, ''),
choices=map(str, range(1, 11)),
style=wx.CB_DROPDOWN
)
return self.widget
def getValue(self):
'''
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
'''
dropdown_value = self.widget.GetValue()
if not str(dropdown_value).isdigit():
return ''
arg = str(self.option_string).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args
def safe_default(data, default):
# str(None) is 'None'!? Whaaaaat...?
return str(data['default']) if data['default'] else ''
| {
"repo_name": "intfrr/Gooey",
"path": "gooey/gui/widgets/widget_pack.py",
"copies": "5",
"size": "6317",
"license": "mit",
"hash": 3012179099680043000,
"line_mean": 27.3813953488,
"line_max": 199,
"alpha_frac": 0.6509419028,
"autogenerated": false,
"ratio": 3.542905215928211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6693847118728211,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import xml.etree.ElementTree as ET
# from copy import deepcopy
# BUG: Nested <anchor> tags are disallowed, which can break headings
# e.g. footnotes in heading.
# Possible fix: wrap anchor heading in §-span element instead of heading.
def _class_iter(it, class_):
for child in filter(lambda el: class_ in el.get("class", []), it):
yield child
def _prepend(el, child):
child.tail = el.text
el.text = None
el.insert(0, child)
def _branch(el, branch):
branch.text = el.text
branch.extend(list(el))
#branch.tail = el.tail
#el.text = el.tail = None
el.text = None
for child in list(el): el.remove(child)
el.append(branch)
def _next(it):
return next(it, None)
def table_of_contents(tree):
# BUG bool(Element) == len(Element)
if (toc := _next(tree.iter(f"toc"))) is None:
return tree
if (content := _next(_class_iter(tree.iter("section"), "content"))) is None:
return tree
HEADINGS = {"h1", "h2", "h3", "h4", "h5", "h6"}
# import re
# is_heading = re.compile("^h[1-6]$").match
toc_tree = {}
path = []
# iter does dfs (depth-first search)
for h in filter(lambda e: e.tag in HEADINGS, content.iter()):
# note that level > h.tag if <h#> < <h#>
# expects tags to be lowercased (who uses uppercased tags??)
# ignore headers marked with no-toc
if "no-toc" in h.get("class", []): continue
while path and h.tag <= path[-1].tag:
path.pop()
current = toc_tree
for key in path:
current = current[key]
current[h] = {}
path.append(h)
def listify(d, level):
ol = ET.Element("ol")
for i, (el, children) in enumerate(d.items(), 1):
current_level = f"{level}.{i}" if level else str(i)
# Make <list><a>
li = ET.SubElement(ol, "li")
anchor = ET.SubElement(li, "a")
# Add level to id and hyperlink reference to toc
el.attrib.setdefault("id", []).append(f"toc-{current_level}")
anchor.set("href", f"#toc-{current_level}")
# Copy header text to anchor
# deepcopy?
anchor.text = el.text
if len(el): anchor.extend(list(el)) # add children if any
# Add link to self (wrap in <a>)
if "no-ref" not in h.get("class", []):
ref = ET.Element("a")
ref.set("href", f"#toc-{current_level}")
_branch(el, ref)
# Add numbering to heading
if "no-num" not in h.get("class", []):
num = ET.Element("span", attrib={"class": "section-num"})
num.text = current_level+" "
_prepend(el, num)
# Recurse on children
if children: li.append(listify(children, current_level))
return ol
toc.tag = "details"
summary = ET.SubElement(toc, "summary")
summary.text = "Table of contents"
toc_nav = ET.Element("nav")
toc_nav.append(listify(toc_tree, ""))
toc.append(toc_nav)
return tree
def order_headings(tree):
# Ensures headers after <h1> are <h2> and so on
NotImplemented
def BEM_classing(tree):
# .block__element--modifier1--modifier2 ->
# .block__element.block__element--modifier1.block__element--modifier2
NotImplemented
def treenumerate(tree, figure_tag):
# BUG bool(Element) == len(Element)
if (toc := _next(tree.iter(f"toc-{figure_tag}"))) is None:
return tree
if (content := _next(_class_iter(tree.iter("section"), "content"))) is None:
return tree
figures = {}
toc_nav = ET.Element("nav")
toc_list = ET.SubElement(toc_nav, "ol")
figure_class = f"a-{figure_tag}"
for i, fig in enumerate(_class_iter(content.iter("figure"), figure_class), 1):
label = fig.get("id")[0]
heading = fig[0]
figures[label] = i
# Make <list><a>
li = ET.SubElement(toc_list, "li")
anchor = ET.SubElement(li, "a")
# Add level to id and hyperlink reference to toc
heading.attrib.setdefault("id", []).append(f"toc-{figure_tag}-{i}")
anchor.set("href", f"#toc-{figure_tag}-{i}")
# Copy header text to anchor
# deepcopy?
anchor.text = heading.text
if len(heading): anchor.extend(list(heading)) # add children if any
# Add link to self (wrap in <a>)
if "no-ref" not in heading.get("class", []):
ref = ET.Element("a")
ref.set("href", f"#toc-{figure_tag}-{i}")
_branch(heading, ref)
# Add numbering to heading
if "no-num" not in heading.get("class", []):
num = ET.Element("span", attrib={"class": "figure-num"})
num.text = f"{figure_tag} {i}: "
_prepend(heading, num)
toc.tag = "details"
summary = ET.SubElement(toc, "summary")
summary.text = f"Table of {figure_tag}s"
toc.append(toc_nav)
# References
for ref in content.iter("ref"):
figtype, label = next(iter(ref.items()))
if figtype.lower() == figure_tag and label in figures:
ref.attrib.pop(figtype)
ref.tag = "a"
ref.set("href", f"#toc-{figure_tag}-{i}")
ref.text = f"{figtype} {i}"
return tree
def eqnumerate(tree):
if (content := _next(_class_iter(tree.iter("section"), "content"))) is None:
return tree
equations = {}
for equation in _class_iter(tree.iter("figure"), "a-equation"):
label = equation.get("id")[0]
first = equation[0][0][0][0][0][0].text
last = equation[0][0][-1][0][0][0].text
if first == last: equations[label] = first
else: equations[label] = f"{first[:-1]}-{last[1:]}"
# References
for ref in content.iter("ref"):
figtype, label = next(iter(ref.items()))
if figtype.lower() == "equation" and label in equations:
ref.attrib.pop(figtype)
ref.tag = "a"
ref.set("href", f"#{label}")
ref.text = f"{figtype} {equations[label]}"
return tree
def _citation(entry):
span = ET.Element("span")
entrytype = entry["type"]
entrydata = entry["data"]
entryfields = {
"article" : ["author", "title", "journal", "year"],
"book" : ["author", "title", "journal", "year"],
"misc" : ["author", "title", "journal", "year", "url", "note"],
}[entrytype]
element = {
"author" : "span",
"title" : "cite",
"journal" : "span",
"year" : "time",
"url" : "a",
"note" : "span",
}
for key in entryfields:
tag = element[key]
el = ET.SubElement(span, tag)
el.text = str(entrydata[key])
el.tail = ", "
if tag == "time": el.set("datetime", el.text); el.text = f"({el.text})"
if tag == "a":
el.set("href", el.text)
el.set("class",
["link", "link__text", "link__action", "link__action--external"]
)
el.tail = ""
#text = ", ".join(map(str, entry.values()))
return span
def references(tree):
from ast import literal_eval
# BUG bool(Element) == len(Element)
if (refblock := _next(tree.iter("references"))) is None:
return tree
if (content := _next(_class_iter(tree.iter("section"), "content"))) is None:
return tree
refblock.tag = "nav"
ref_file = refblock.attrib.pop("src")
with open(ref_file, mode="r", encoding="utf-8") as f:
refs = literal_eval(f.read())
reflist = ET.SubElement(refblock, "ul", attrib={"class" : "reference-list"})
citations = {}
for i, citation in enumerate(content.iter("ref"), 1):
figtype, label = next(iter(citation.items()))
if figtype.lower() != "citation": continue
citation.attrib.pop(figtype)
post = ", ".join(map(" ".join, citation.items()))
citation.tag = "cite"
citation.attrib.setdefault("class", []).append("citation")
citation.set("id", f"reference-{i}")
citation.text = f"{i}" if not post else f"{i}, {post}"
ref = ET.Element("a")
ref.set("href", f"#reference-pointer-{i}")
_branch(citation, ref)
if label not in citations:
li = ET.SubElement(reflist, "li", attrib={"class" : "reference"})
alist = ET.SubElement(li, "ul", attrib={"class" : "ref-pointers"})
citations[label] = alist
citing = ET.SubElement(li, "span")
citing.append(_citation(refs[label]))
alist = citations[label]
li = ET.SubElement(alist, "li", attrib={"class" : "ref-pointer"})
li.set("id", f"reference-pointer-{i}")
pointer = ET.SubElement(li, "a")
pointer.set("href", f"#reference-{i}")
pointer.text = f"{i}^"
return tree
def footnotes(tree):
# BUG bool(Element) == len(Element)
if (fnblock := _next(tree.iter("footnotes"))) is None:
return tree
if (content := _next(_class_iter(tree.iter("section"), "content"))) is None:
return tree
fnblock.tag = "nav"
fnlist = ET.SubElement(fnblock, "ul", attrib={"class" : "footnote-list"})
for i, footnote in enumerate(content.iter("footnote"), 1):
footnote.set("title", "".join(footnote.itertext()))
# move content
li = ET.SubElement(fnlist, "li")
pointer = ET.SubElement(li, "a")
pointer.set("id", f"footnote-pointer-{i}")
pointer.set("href", f"#footnote-{i}")
pointer.text = f"{i}^ "
note = ET.SubElement(li, "span")
note.text = footnote.text
note.extend(list(footnote))
# clear footnote after moving content
for child in list(footnote): footnote.remove(child)
footnote.tag = "sup"
footnote.attrib.setdefault("class", []).append("footnote")
footnote.set("id", f"footnote-{i}")
footnote.text = f"{i}"
# make pointer
ref = ET.Element("a")
ref.set("href", f"#footnote-pointer-{i}")
_branch(footnote, ref)
return tree
def unused_elements(tree, tags):
# It would be better to iterate over all elements
# and check if they are valid html tags
for tag in tags:
for el in tree.iter(tag):
print(f"Unused element {el.tag} with attrib {el.attrib}")
return tree
modifiers = [
table_of_contents,
partial(treenumerate, figure_tag="table"),
partial(treenumerate, figure_tag="figure"),
partial(treenumerate, figure_tag="listing"),
eqnumerate,
references,
footnotes,
partial(unused_elements, tags=["ref"])
]
def treemodifier(tree):
for modifier in modifiers:
tree = modifier(tree)
return tree
| {
"repo_name": "fourpoints/addup",
"path": "treemodifier.py",
"copies": "1",
"size": "11337",
"license": "mit",
"hash": -5218612293273455000,
"line_mean": 25.3816425121,
"line_max": 82,
"alpha_frac": 0.532639379,
"autogenerated": false,
"ratio": 3.632169176545979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4664808555545979,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from bunch import Bunch
from flask import request
from flask_login import current_user
from flask_menu import MenuEntryMixin
from flask_menu import current_menu
from flask_menu import register_menu
def visible_when(roles):
"""
:param list[str] roles:
:return:
"""
if not roles:
return True
actual_roles = set(roles)
expect_roles = set()
if hasattr(current_user, 'role'):
expect_roles.add(current_user.role.name)
if hasattr(current_user, 'roles'):
for role in current_user.roles:
expect_roles.add(role.name)
return actual_roles & expect_roles
def active_when(menu_item):
"""
:param MenuEntryMixin menu_item:
:return:
"""
if request.endpoint == menu_item._endpoint:
return True
return False
def config_menu(app, items):
"""
items contains menu item, like below
{'name': 'profile', 'text': 'Home', 'roles': ['admin'], 'order': 1}
:param flask.Flask app:
:param list[Bunch] items:
"""
if not items:
return
@app.before_first_request
def before_first_request():
for item in items:
name = item.pop('name')
menu_item = current_menu.submenu(name) # type: MenuEntryMixin
item.endpoint = None
item.visible_when = partial(visible_when, item.get('roles'))
# kwargs['active_when'] = active_when
menu_item.register(**item)
def register_menu_ex(app, path, text, **kwargs):
"""
:param app:
:param path:
:param text:
:param kwargs:
"""
new_visible_when = partial(visible_when, kwargs.get('roles'))
kwargs['visible_when'] = new_visible_when
return register_menu(app, path, text, **kwargs)
register_menu = partial(register_menu, visible_when=visible_when)
| {
"repo_name": "by46/flask-kits",
"path": "flask_kits/ui/menu.py",
"copies": "1",
"size": "1938",
"license": "mit",
"hash": -7486205298721335000,
"line_mean": 22.5316455696,
"line_max": 74,
"alpha_frac": 0.5954592363,
"autogenerated": false,
"ratio": 3.860557768924303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9943845632293437,
"avg_score": 0.0024342745861733205,
"num_lines": 79
} |
from functools import partial
from django.contrib.gis.db.models import aggregates
class BaseSpatialFeatures(object):
gis_enabled = True
# Does the database contain a SpatialRefSys model to store SRID information?
has_spatialrefsys_table = True
# Does the backend support the django.contrib.gis.utils.add_srs_entry() utility?
supports_add_srs_entry = True
# Does the backend introspect GeometryField to its subtypes?
supports_geometry_field_introspection = True
# Reference implementation of 3D functions is:
# http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
supports_3d_functions = False
# Does the database support SRID transform operations?
supports_transform = True
# Do geometric relationship operations operate on real shapes (or only on bounding boxes)?
supports_real_shape_operations = True
# Can geometry fields be null?
supports_null_geometries = True
# Can the `distance` GeoQuerySet method be applied on geodetic coordinate systems?
supports_distance_geodetic = True
# Is the database able to count vertices on polygons (with `num_points`)?
supports_num_points_poly = True
# The following properties indicate if the database backend support
# certain lookups (dwithin, left and right, relate, ...)
supports_distances_lookups = True
supports_left_right_lookups = False
@property
def supports_bbcontains_lookup(self):
return 'bbcontains' in self.connection.ops.gis_operators
@property
def supports_contained_lookup(self):
return 'contained' in self.connection.ops.gis_operators
@property
def supports_crosses_lookup(self):
return 'crosses' in self.connection.ops.gis_operators
@property
def supports_dwithin_lookup(self):
return 'dwithin' in self.connection.ops.gis_operators
@property
def supports_relate_lookup(self):
return 'relate' in self.connection.ops.gis_operators
# For each of those methods, the class will have a property named
# `has_<name>_method` (defined in __init__) which accesses connection.ops
# to determine GIS method availability.
geoqueryset_methods = (
'area', 'centroid', 'difference', 'distance', 'distance_spheroid',
'envelope', 'force_rhr', 'geohash', 'gml', 'intersection', 'kml',
'length', 'num_geom', 'perimeter', 'point_on_surface', 'reverse',
'scale', 'snap_to_grid', 'svg', 'sym_difference', 'transform',
'translate', 'union', 'unionagg',
)
# Specifies whether the Collect and Extent aggregates are supported by the database
@property
def supports_collect_aggr(self):
return aggregates.Collect not in self.connection.ops.disallowed_aggregates
@property
def supports_extent_aggr(self):
return aggregates.Extent not in self.connection.ops.disallowed_aggregates
@property
def supports_make_line_aggr(self):
return aggregates.MakeLine not in self.connection.ops.disallowed_aggregates
def __init__(self, *args):
super(BaseSpatialFeatures, self).__init__(*args)
for method in self.geoqueryset_methods:
# Add dynamically properties for each GQS method, e.g. has_force_rhr_method, etc.
setattr(self.__class__, 'has_%s_method' % method,
property(partial(BaseSpatialFeatures.has_ops_method, method=method)))
def has_ops_method(self, method):
return getattr(self.connection.ops, method, False)
| {
"repo_name": "diego-d5000/MisValesMd",
"path": "env/lib/python2.7/site-packages/django/contrib/gis/db/backends/base/features.py",
"copies": "1",
"size": "3630",
"license": "mit",
"hash": -1075135016076699000,
"line_mean": 39.25,
"line_max": 94,
"alpha_frac": 0.6818181818,
"autogenerated": false,
"ratio": 4.250585480093677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012962758448992085,
"num_lines": 88
} |
from functools import partial
from stdnet.utils import encoders
from stdnet import QuerySetError, ManyToManyError
from .globals import Event
from .session import Manager, LazyProxy
__all__ = ['LazyForeignKey', 'ModelFieldPickler']
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
class_prepared = Event()
class ModelFieldPickler(encoders.Encoder):
'''An encoder for :class:`StdModel` instances.'''
def __init__(self, model):
self.model = model
def dumps(self, obj):
return obj.pkvalue()
def require_session(self):
return True
def load_iterable(self, iterable, session):
ids = []
backend = session.model(self.model).read_backend
tpy = self.model.pk().to_python
ids = [tpy(id, backend) for id in iterable]
result = session.query(self.model).filter(id=ids).all()
return backend.execute(result, partial(self._sort, ids))
def _sort(self, ids, results):
results = dict(((r.pkvalue(), r) for r in results))
return [results.get(id) for id in ids]
def load_relmodel(field, callback):
relmodel = None
relation = field.relmodel
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relmodel = field.model
else:
try:
app_label, model_name = relation.lower().split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = field.model._meta.app_label
model_name = relation.lower()
except AttributeError:
relmodel = relation
if relmodel:
callback(relmodel)
else:
key = (app_label, model_name)
if key not in pending_lookups:
pending_lookups[key] = []
pending_lookups[key].append(callback)
def do_pending_lookups(event, sender, **kwargs):
"""Handle any pending relations to the sending model.
Sent from class_prepared."""
key = (sender._meta.app_label, sender._meta.name)
for callback in pending_lookups.pop(key, []):
callback(sender)
class_prepared.bind(do_pending_lookups)
def Many2ManyThroughModel(field):
'''Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys.'''
from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField
name_model = field.model._meta.name
name_relmodel = field.relmodel._meta.name
# The two models are the same.
if name_model == name_relmodel:
name_relmodel += '2'
through = field.through
# Create the through model
if through is None:
name = '{0}_{1}'.format(name_model, name_relmodel)
class Meta:
app_label = field.model._meta.app_label
through = ModelType(name, (StdModel,), {'Meta': Meta})
field.through = through
# The first field
field1 = ForeignKey(field.model,
related_name=field.name,
related_manager_class=makeMany2ManyRelatedManager(
field.relmodel,
name_model,
name_relmodel)
)
field1.register_with_model(name_model, through)
# The second field
field2 = ForeignKey(field.relmodel,
related_name=field.related_name,
related_manager_class=makeMany2ManyRelatedManager(
field.model,
name_relmodel,
name_model)
)
field2.register_with_model(name_relmodel, through)
pk = CompositeIdField(name_model, name_relmodel)
pk.register_with_model('id', through)
class LazyForeignKey(LazyProxy):
'''Descriptor for a :class:`ForeignKey` field.'''
def load(self, instance, session=None, backend=None):
return instance._load_related_model(self.field)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" %
self._field.name)
field = self.field
if value is not None and not isinstance(value, field.relmodel):
raise ValueError(
'Cannot assign "%r": "%s" must be a "%s" instance.' %
(value, field, field.relmodel._meta.name))
cache_name = self.field.get_cache_name()
# If we're setting the value of a OneToOneField to None,
# we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still
# be available since we've not yet cleared out the related field.
related = getattr(instance, cache_name, None)
if related:
try:
delattr(instance, cache_name)
except AttributeError:
pass
setattr(instance, self.field.attname, None)
else:
setattr(instance, self.field.attname, value.pkvalue())
setattr(instance, cache_name, value)
class RelatedManager(Manager):
'''Base class for managers handling relationships between models.
While standard :class:`Manager` are class properties of a model,
related managers are accessed by instances to easily retrieve instances
of a related model.
.. attribute:: relmodel
The :class:`StdModel` this related manager relates to.
.. attribute:: related_instance
An instance of the :attr:`relmodel`.
'''
def __init__(self, field, model=None, instance=None):
self.field = field
model = model or field.model
super(RelatedManager, self).__init__(model)
self.related_instance = instance
def __get__(self, instance, instance_type=None):
return self.__class__(self.field, self.model, instance)
def session(self, session=None):
'''Override :meth:`Manager.session` so that this
:class:`RelatedManager` can retrieve the session from the
:attr:`related_instance` if available.
'''
if self.related_instance:
session = self.related_instance.session
# we have a session, we either create a new one return the same session
if session is None:
raise QuerySetError('Related manager can be accessed only from\
a loaded instance of its related model.')
return session
class One2ManyRelatedManager(RelatedManager):
'''A specialised :class:`RelatedManager` for handling one-to-many
relationships under the hood.
If a model has a :class:`ForeignKey` field, instances of
that model will have access to the related (foreign) objects
via a simple attribute of the model.'''
@property
def relmodel(self):
return self.field.relmodel
def query(self, session=None):
# Override query method to account for related instance if available
query = super(One2ManyRelatedManager, self).query(session)
if self.related_instance is not None:
kwargs = {self.field.name: self.related_instance}
return query.filter(**kwargs)
else:
return query
def query_from_query(self, query, params=None):
if params is None:
params = query
return query.session.query(self.model, fargs={self.field.name: params})
class Many2ManyRelatedManager(One2ManyRelatedManager):
'''A specialized :class:`Manager` for handling
many-to-many relationships under the hood.
When a model has a :class:`ManyToManyField`, instances
of that model will have access to the related objects via a simple
attribute of the model.'''
def session_instance(self, name, value, session, **kwargs):
if self.related_instance is None:
raise ManyToManyError('Cannot use "%s" method from class' % name)
elif not self.related_instance.pkvalue():
raise ManyToManyError('Cannot use "%s" method on a non persistent '
'instance.' % name)
elif not isinstance(value, self.formodel):
raise ManyToManyError(
'%s is not an instance of %s' % (value, self.formodel._meta))
elif not value.pkvalue():
raise ManyToManyError('Cannot use "%s" a non persistent instance.'
% name)
kwargs.update({self.name_formodel: value,
self.name_relmodel: self.related_instance})
return self.session(session), self.model(**kwargs)
def add(self, value, session=None, **kwargs):
'''Add ``value``, an instance of :attr:`formodel` to the
:attr:`through` model. This method can only be accessed by an instance of the
model for which this related manager is an attribute.'''
s, instance = self.session_instance('add', value, session, **kwargs)
return s.add(instance)
def remove(self, value, session=None):
'''Remove *value*, an instance of ``self.model`` from the set of
elements contained by the field.'''
s, instance = self.session_instance('remove', value, session)
# update state so that the instance does look persistent
instance.get_state(iid=instance.pkvalue(), action='update')
return s.delete(instance)
def throughquery(self, session=None):
'''Return a :class:`Query` on the ``throughmodel``, the model
used to hold the :ref:`many-to-many relationship <many-to-many>`.'''
return super(Many2ManyRelatedManager, self).query(session)
def query(self, session=None):
# Return a query for the related model
ids = self.throughquery(session).get_field(self.name_formodel)
pkey = self.formodel.pk().name
fargs = {pkey: ids}
return self.session(session).query(self.formodel).filter(**fargs)
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel):
'''formodel is the model which the manager .'''
class _Many2ManyRelatedManager(Many2ManyRelatedManager):
pass
_Many2ManyRelatedManager.formodel = formodel
_Many2ManyRelatedManager.name_relmodel = name_relmodel
_Many2ManyRelatedManager.name_formodel = name_formodel
return _Many2ManyRelatedManager
| {
"repo_name": "lsbardel/python-stdnet",
"path": "stdnet/odm/related.py",
"copies": "1",
"size": "10661",
"license": "bsd-3-clause",
"hash": -4821908459840020000,
"line_mean": 36.9087591241,
"line_max": 79,
"alpha_frac": 0.6184222868,
"autogenerated": false,
"ratio": 4.290140845070423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5408563131870423,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import wx
from gooey.gui.util import wx_util
from gooey.gui.widgets import widget_pack
class BaseGuiComponent(object):
widget_class = None
def __init__(self, parent, title, msg, choices=None):
'''
:param data: field info (title, help, etc..)
:param widget_pack: internal wxWidgets to render
'''
# parent
self.parent = parent
# Widgets
self.title = None
self.help_msg = None
self.choices = choices
# Internal WidgetPack set in subclasses
self.do_layout(parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget_pack = self.widget_class()
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
core_widget_set = self.widget_pack.build(self.panel, {}, self.choices)
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
vertical_container.AddSpacer(2)
if self.help_msg.GetLabelText():
vertical_container.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.AddSpacer(2)
else:
vertical_container.AddStretchSpacer(1)
vertical_container.Add(core_widget_set, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
return self.panel
def bind(self, *args, **kwargs):
print self.widget_pack.widget.Bind(*args, **kwargs)
def get_title(self):
return self.title.GetLabel()
def set_title(self, text):
self.title.SetLabel(text)
def get_help_msg(self):
return self.help_msg.GetLabelText()
def set_label_text(self, text):
self.help_msg.SetLabel(text)
def format_help_msg(self, parent, msg):
base_text = wx.StaticText(parent, label=msg or '')
wx_util.dark_grey(base_text)
return base_text
def format_title(self, parent, title):
text = wx.StaticText(parent, label=title)
wx_util.make_bold(text)
return text
def onResize(self, evt):
# handle internal widgets
# self.panel.Freeze()
self._onResize(evt)
# propagate event to child widgets
self.widget_pack.onResize(evt)
evt.Skip()
# self.panel.Thaw()
def _onResize(self, evt):
if not self.help_msg:
return
self.panel.Size = evt.GetSize()
container_width, _ = self.panel.Size
text_width, _ = self.help_msg.Size
if text_width != container_width:
self.help_msg.SetLabel(self.help_msg.GetLabelText().replace('\n', ' '))
self.help_msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget_pack.get_value()
def set_value(self, val):
if val:
self.widget_pack.widget.SetValue(unicode(val))
def __repr__(self):
return self.__class__.__name__
class CheckBox(BaseGuiComponent):
def __init__(self, parent, title, msg, choices=None):
BaseGuiComponent.__init__(self, parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget = wx.CheckBox(self.panel)
# self.widget.SetValue(self.default_value)
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
# self.help_msg.Bind(wx.EVT_LEFT_UP, lambda event: self.widget.SetValue(not self.widget.GetValue()))
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
horizontal_sizer.Add(self.widget, 0, wx.EXPAND | wx.RIGHT, 10)
horizontal_sizer.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.Add(horizontal_sizer, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def onResize(self, evt):
msg = self.help_msg
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget.GetValue()
def set_value(self, val):
self.widget.SetValue(val)
class RadioGroup(object):
def __init__(self, parent, title, msg, choices=None):
self.panel = None
self.radio_buttons = []
self.option_strings = []
self.help_msgs = []
self.btn_names = []
self.do_layout(parent, title, msg)
def do_layout(self, parent, titles, msgs):
self.panel = wx.Panel(parent)
self.radio_buttons = [wx.RadioButton(self.panel, -1) for _ in titles]
self.btn_names = [wx.StaticText(self.panel, label=title.title()) for title in titles]
self.help_msgs = [wx.StaticText(self.panel, label=msg.title()) for msg in msgs]
# box = wx.StaticBox(self.panel, -1, label=self.data['group_name'])
box = wx.StaticBox(self.panel, -1, label='')
vertical_container = wx.StaticBoxSizer(box, wx.VERTICAL)
for button, name, help in zip(self.radio_buttons, self.btn_names, self.help_msgs):
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(button, 0, wx.ALIGN_TOP | wx.ALIGN_LEFT)
hbox.Add(name, 0, wx.LEFT, 10)
vertical_container.Add(hbox, 0, wx.EXPAND)
vertical_container.Add(help, 1, wx.EXPAND | wx.LEFT, 25)
vertical_container.AddSpacer(5)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
self.panel.Bind(wx.EVT_RADIOBUTTON, self.showz)
return self.panel
def showz(self, evt):
print evt
for i in self.radio_buttons:
print i.GetValue()
def onResize(self, evt):
msg = self.help_msgs[0]
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return [button.GetValue() for button in self.radio_buttons]
def set_value(self, val):
pass
def build_subclass(name, widget_class):
# this seemed faster than typing class X a bunch
return type(name, (BaseGuiComponent,), {'widget_class': widget_class})
FileChooser = build_subclass('FileChooser', widget_pack.FileChooserPayload)
MultiFileChooser = build_subclass('MultiFileChooser', widget_pack.MultiFileSaverPayload)
DirChooser = build_subclass('DirChooser', widget_pack.DirChooserPayload)
FileSaver = build_subclass('FileSaver', widget_pack.FileSaverPayload)
DateChooser = build_subclass('DateChooser', widget_pack.DateChooserPayload)
TextField = build_subclass('TextField', widget_pack.TextInputPayload)
Textarea = build_subclass('TextField', widget_pack.TextAreaPayload)
CommandField = build_subclass('CommandField', widget_pack.TextInputPayload(no_quoting=True))
Dropdown = build_subclass('Dropdown', widget_pack.DropdownPayload)
Counter = build_subclass('Counter', widget_pack.CounterPayload)
MultiDirChooser = build_subclass('MultiDirChooser', widget_pack.MultiDirChooserPayload)
| {
"repo_name": "jschultz/Gooey",
"path": "gooey/gui/widgets/components.py",
"copies": "1",
"size": "7280",
"license": "mit",
"hash": 8316712383884380000,
"line_mean": 28.7172995781,
"line_max": 104,
"alpha_frac": 0.6557692308,
"autogenerated": false,
"ratio": 3.440453686200378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4596222917000378,
"avg_score": null,
"num_lines": null
} |
from functools import partial, singledispatch
import warnings
import numbers
from typing import Union, Tuple
from .transferfunction import TransferFunction, _tf_to_symbol
from .statespace import StateSpace
from .plot_utility import _plot_response_curve
from .discretization import c2d
from .model_conversion import tf2ss
import numpy as np
import sympy as sym
__all__ = ['impulse', 'step', 'ramp', 'any_input']
def __get_cs(sys_, input_signal):
"""
:param sys_:
:type sys_: TransferFunction
:param input_signal:
:type input_signal: str
:return:
:rtype: sym.Add
"""
from sympy.parsing.sympy_parser import parse_expr
s = sym.Symbol('s')
t = sym.Symbol('t')
input_expr = sym.simplify(parse_expr(input_signal))
signal_table = {'step': 1/s, 'impulse': 1, '0': 0, 'ramp': 1/s**2,
'user': sym.laplace_transform(input_expr, t, s)[0]}
gs, *_ = _tf_to_symbol(sys_.num, sys_.den)
cs = gs*signal_table.get(input_signal, signal_table["user"])
return cs
def __ilaplace(expr):
"""
Use:
conduct the ilaplace transform
:param expr: the expression
:type expr: sympy.Add
:return:
:rtype:
"""
from sympy.integrals.transforms import inverse_laplace_transform
s = sym.Symbol('s')
t = sym.Symbol('t')
cs = expr.apart(s)
tmp = sum(cs.args)
if expr.equals(tmp):
polys = [sym.nsimplify(i, tolerance=0.001, rational=True) for i in tmp.args]
ilaplace_p = partial(inverse_laplace_transform, s=s, t=t)
ct = 0
for i in polys:
i = ilaplace_p(i)
ct += i
else:
cs = sym.nsimplify(expr, tolerance=0.001, rational=True)
ct = inverse_laplace_transform(cs, s, t)
if ct.has(sym.Heaviside):
ct = ct.replace(sym.Heaviside, sym.numbers.One)
if ct.has(sym.InverseLaplaceTransform):
ct = ct.replace(sym.InverseLaplaceTransform, sym.numbers.Zero)
return ct
# convert system to state space then get result
def _any_input(sys_, t, input_signal=0, init_cond=None):
"""
Accept any input signal, then calculate the response of the system.
:param sys_: the system
:type sys_: TransferFunction | StateSpace
:param t: time
:type t: np.ndarray
:param input_signal: input signal accepted by the system
:type input_signal: numbers.Real | np.ndarray
:param init_cond: initial condition of the system
:type init_cond: None | numbers.Real | np.ndarray
:return: system output and time array
:rtype: Tuple[np.ndarray, np.ndarray]
:raises TypeError: when give wrong types of arguments
:raises ValueError: raised when t, input_signal or init_cond has a wrong shape
:raises NotImplementedError: when system is a MIMO system
.. note:: This is internal api.
"""
# convert transfer function or continuous system to discrete system
dt = t[1] - t[0]
if dt > 0.02 and sys_.is_ctime:
warnings.warn("Large sample time will lead to low accuracy.",
stacklevel=3)
if sys_.is_ctime:
d_sys_ = c2d(sys_, dt)
else:
if _is_dt_validated(sys_, dt):
d_sys_ = sys_
else:
raise ValueError('The step of time vector didn\'t match the sample time of '
'the system.')
# check the input_signal validity
if d_sys_.is_siso:
u = _setup_control_signal(input_signal, t)
else:
raise NotImplementedError("not support MIMO system right now") # TODO: finish it
if init_cond is None:
init_cond = np.mat(np.zeros((d_sys_.A.shape[0], 1)))
else:
# check the shape of init_cond
if init_cond.shape[0] != d_sys_.A.shape[0] or init_cond.shape[1] != 1:
raise ValueError("wrong dimension of init condition")
x = _cal_x(d_sys_.A, d_sys_.B, len(t[1:]), init_cond, u)
y = _cal_y(d_sys_.C, d_sys_.D, len(x), x, u)
if sys_.is_siso:
y = np.asarray(y).reshape(-1)
else:
y = [np.asarray(_).reshape(-1) for _ in y]
return np.array(y), t
def _is_dt_validated(sys_: Union[TransferFunction, StateSpace],
dt: Union[int, float]) -> bool:
if abs(dt - sys_.dt) <= 1e-7:
return True
else:
return False
@singledispatch
def _setup_control_signal(input_signal, t):
raise TypeError("Wrong type is given.")
@_setup_control_signal.register(np.ndarray)
def f(input_signal, t):
if input_signal.shape == t.shape:
u = input_signal
else:
raise ValueError("The input signal should have the same shape with t.")
return u
@_setup_control_signal.register(list)
def f(input_signal, t):
u = np.array(input_signal)
if u.shape != t.shape:
raise ValueError("The input signal should have the same shape with t")
return u
def _cal_x(G, H, n, x_0, u):
"""
calculate x step by step
"""
x = [x_0]
for i in range(n):
x_k = G*x[i] + H*u[i]
x.append(x_k)
return x
def _cal_y(C, D, n, x, u):
"""
calculate system output
"""
y = []
for i in range(n):
y_k = C*x[i] + D*u[i]
y.append(y_k)
return y
def step(sys_, t=None, *, plot=True):
"""
step response of the system
.. seealso:: any_input
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
if t is None:
t = _setup_time_vector(sys_)
u = np.ones(t.shape, dtype=int)
y, t = _any_input(sys_, t, u)
if plot:
_plot_response_curve(y, t, "step response", sys_.is_ctime)
return y, t
def impulse(sys_, t=None, *, plot=True, **kwargs):
"""
impulse response of the system
.. seealso:: any_input
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
if t is None:
t = _setup_time_vector(sys_)
u = np.zeros(t.shape)
x0 = kwargs.get('x0')
K = kwargs.get('K', 1)
if not sys_.is_ctime:
u[0] = 1
else:
x0 = sys_.B * K if x0 is None else x0 + sys_.B * K
y, t = _any_input(sys_, t, u, x0)
if plot:
_plot_response_curve(y, t, "impulse response", sys_.is_ctime)
return y, t
def ramp(sys_, t=None, *, plot=True):
"""
ramp response of the system
.. seealso:: any_input
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
if t is None:
t = _setup_time_vector(sys_)
u = t
y, t = _any_input(sys_, t, u)
if plot:
_plot_response_curve(y, t, "impulse response", sys_.is_ctime)
return y, t
def any_input(sys_, t, input_signal=0, init_cond=None, *, plot=True):
"""
Accept any input signal, then calculate the response of the system.
:param sys_: the system
:type sys_: TransferFunction | StateSpace
:param t: time
:type t: array_like
:param input_signal: input signal accepted by the system
:type input_signal: numbers.Real | np.ndarray
:param init_cond: initial condition of the system
:type init_cond: None | numbers.Real | np.ndarray
:param plot: If plot is True, it will show the response curve.
:type plot: bool
:return: system output and time array
:rtype: tuple[np.ndarray, np.ndarray]
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
y, t = _any_input(sys_, t, input_signal, init_cond)
if plot:
_plot_response_curve(y, t, "response", sys_.is_ctime)
return y, t
def _setup_time_vector(sys_: StateSpace, n: int=1000):
eigvals = np.linalg.eigvals(sys_.A)
tc = 1 / np.min(np.abs(eigvals)) * 2
if tc == np.inf:
tc = 1
if sys_.is_ctime:
return np.linspace(0, 10 * tc, n)
else:
return np.arange(0, 10 * sys_.dt + 1, sys_.dt)
| {
"repo_name": "DaivdZhang/tinyControl",
"path": "tcontrol/time_response.py",
"copies": "1",
"size": "7780",
"license": "bsd-3-clause",
"hash": -590452373392842200,
"line_mean": 26.0138888889,
"line_max": 89,
"alpha_frac": 0.5951156812,
"autogenerated": false,
"ratio": 3.267534649307014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4362650330507014,
"avg_score": null,
"num_lines": null
} |
from functools import partial, singledispatch, wraps
import stl
import funcy as fn
import numpy as np
from optlang import Variable, Constraint
from magnum.constraint_kinds import Kind as K, Kind
M = 1000 # TODO
def counter(func):
i = 0
@wraps(func)
def _func(*args, **kwargs):
nonlocal i
i += 1
return func(*args, i=i, **kwargs)
return _func
@counter
def z(x: "SL", g, i):
# TODO: come up with better function name
if isinstance(x[0], str) and isinstance(x[1], int):
if x[0] in set(g.model.vars.input) | set(g.model.vars.env):
lo, hi = -1, 1
else:
lo = hi = None
kwargs = {'name': f"{x[0]}_{x[1]}", 'lb': lo, 'ub': hi}
return (Variable(**kwargs), )
r_var = Variable(name=f"r{i}")
if not isinstance(x, (stl.And, stl.Or)):
return (r_var, )
bool_vars = {
arg: Variable(type='binary', name=f"p{i}_{j}")
for j, arg in enumerate(x.args)
}
return (r_var, tuple(bool_vars.items()))
@singledispatch
def encode(psi, s, t):
raise NotImplementedError(psi)
@encode.register(stl.Neg)
def encode_neg(phi, s, t):
yield Constraint(s[phi][0] + s[phi.arg][0], lb=0, ub=0), K.NEG
yield from encode(phi.arg, s, t)
@encode.register(stl.Next)
def encode_next(phi, s, t):
yield from encode(phi.arg, s, t + 1)
@encode.register(stl.ast._Top)
@encode.register(stl.ast._Bot)
def encode_top_bot(phi, s, t):
yield from []
@encode.register(stl.LinEq)
def encode_lineq(psi, s, t):
x = sum(float(term.coeff) * s[(term.id, t)][0] for term in psi.terms)
y = s[psi >> t][0]
if psi.op in (">", ">="):
expr = x - y
elif psi.op in ("<", "<="):
expr = x + y
else:
raise NotImplementedError
constr = Constraint(expr, ub=psi.const, lb=psi.const)
yield constr, psi
def encode_op(phi: "SL", s, t, *, k: Kind, isor: bool):
for psi in phi.args:
yield from encode(psi, s, t)
r_var, bool_vars = s[phi]
bool_vars = dict(bool_vars)
# At most one of the bool vars is active (chosen var)
constr = Constraint(sum(bool_vars.values()), ub=1, lb=1)
yield constr, k[1]
# For each variable comput r and assert rel to psi r
elems = [s[psi][0] for psi in phi.args]
for psi, e in zip(phi.args, elems):
if isor:
yield Constraint(e - r_var, ub=0), k[0]
else:
yield Constraint(r_var - e, ub=0), k[0]
yield Constraint(e - (1 - bool_vars[psi]) * M - r_var, ub=0), phi
yield Constraint(e + M * (1 - bool_vars[psi]) - r_var, lb=0), phi
encode.register(stl.Or)(partial(encode_op, k=(K.OR, K.OR_TOTAL), isor=True))
encode.register(stl.And)(partial(
encode_op, k=(K.AND, K.AND_TOTAL), isor=False))
def encode_dynamics(g, store):
A, B, C = g.model.dyn
dt = g.model.dt
# Adjust for discrete time
A = np.eye(len(g.model.vars.state)) + dt * A
B = dt * B
C = dt * C
times = g.times
yield from fn.cat(
_encode_dynamics(A, B, C, g.model.vars, store, t) for t in times[:-1])
def _encode_dynamics(A, B, C, var_lists, store, t):
rhses = [
row_to_smt(zip([a, b, c], var_lists), store, t)
for a, b, c in zip(A, B, C)
]
lhses = [store[v, t + 1][0] for v in var_lists[0]]
yield from ((Constraint(lhs - rhs, lb=0, ub=0), (lhs, rhs))
for lhs, rhs in zip(lhses, rhses))
def row_to_smt(rows_and_var_lists, store, t):
rows_and_var_lists = list(rows_and_var_lists)
def _row_to_smt(rows_and_vars):
def _create_var(a, x):
return float(a) * store[x, t][0]
return (_create_var(a, x) for a, x in zip(*rows_and_vars))
return sum(fn.mapcat(_row_to_smt, rows_and_var_lists))
| {
"repo_name": "mvcisback/py-blustl",
"path": "magnum/solvers/milp/robustness_encoding.py",
"copies": "2",
"size": "3770",
"license": "bsd-3-clause",
"hash": -4245749179118528500,
"line_mean": 25,
"line_max": 78,
"alpha_frac": 0.5713527851,
"autogenerated": false,
"ratio": 2.8071481757259864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9378500960825986,
"avg_score": 0,
"num_lines": 145
} |
from functools import partial, total_ordering
from django.db import connections, models, router
from django.db.models.deletion import Collector
from django.utils.encoding import python_2_unicode_compatible
import bleach
import six
import olympia.core.logger
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import linkify_bounce_url_callback
from . import utils
log = olympia.core.logger.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids),
locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
@total_ordering
@python_2_unicode_compatible
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = PositiveAutoField(primary_key=True)
id = models.PositiveIntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
unique_together = ('id', 'locale')
def __str__(self):
return (
six.text_type(self.localized_string) if self.localized_string
else '')
def __bool__(self):
# __bool__ is called to evaluate an object in a boolean context.
# We want Translations to be falsy if their string is empty.
return (bool(self.localized_string) and
bool(self.localized_string.strip()))
__nonzero__ = __bool__ # Python 2 compatibility.
def __lt__(self, other):
if hasattr(other, 'localized_string'):
return self.localized_string < other.localized_string
else:
return self.localized_string < other
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
if hasattr(other, 'localized_string'):
return self.localized_string == other.localized_string
else:
return self.localized_string == other
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
with connections['default'].cursor() as cursor:
cursor.execute("""
UPDATE translations_seq
SET id=LAST_INSERT_ID(
id + @@global.auto_increment_increment
)
""")
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute("""
INSERT INTO translations_seq (id)
VALUES(LAST_INSERT_ID(
id + @@global.auto_increment_increment
))
""")
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
@python_2_unicode_compatible
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __str__(self):
if not self.localized_string_clean:
self.clean()
return six.text_type(self.localized_string_clean)
def __html__(self):
return six.text_type(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(six.text_type(self), length, killwords, end)
def clean(self):
from olympia.amo.utils import clean_nl
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkify_filter = partial(
bleach.linkifier.LinkifyFilter,
callbacks=[linkify_bounce_url_callback, bleach.callbacks.nofollow])
# Keep only the allowed tags and attributes, escape the rest.
cleaner = bleach.Cleaner(
tags=self.allowed_tags, attributes=self.allowed_attributes,
filters=[linkify_filter])
return cleaner.clean(six.text_type(self.localized_string))
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoLinksNoMarkupTranslation(LinkifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
def clean_localized_string(self):
# First pass: bleach everything, but leave links untouched.
cleaned = super(LinkifiedTranslation, self).clean_localized_string()
# Second pass: call linkify to empty the inner text of all links.
emptied_links = bleach.linkify(
cleaned, callbacks=[lambda attrs, new: {'_text': ''}])
# Third pass: now strip links (only links will be stripped, other
# forbidden tags are already bleached/escaped.
allowed_tags = self.allowed_tags[:] # Make a copy.
allowed_tags.remove('a')
return bleach.clean(emptied_links, tags=allowed_tags, strip=True)
class TranslationSequence(models.Model):
"""
The translations_seq table, so migrations will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
| {
"repo_name": "wagnerand/olympia",
"path": "src/olympia/translations/models.py",
"copies": "1",
"size": "9233",
"license": "bsd-3-clause",
"hash": 967747549479309300,
"line_mean": 33.7105263158,
"line_max": 79,
"alpha_frac": 0.6122603704,
"autogenerated": false,
"ratio": 4.278498609823911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 266
} |
from functools import partial, total_ordering
from django.db import connections, models, router
from django.db.models.deletion import Collector
import bleach
from bleach.linkifier import URL_RE # build_url_re() with good defaults.
import olympia.core.logger
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import linkify_bounce_url_callback
from . import utils
log = olympia.core.logger.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids), locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
@total_ordering
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = PositiveAutoField(primary_key=True)
id = models.PositiveIntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
constraints = [
models.UniqueConstraint(fields=('id', 'locale'), name='id'),
]
def __str__(self):
return str(self.localized_string) if self.localized_string else ''
def __bool__(self):
# __bool__ is called to evaluate an object in a boolean context.
# We want Translations to be falsy if their string is empty.
return bool(self.localized_string) and bool(self.localized_string.strip())
__nonzero__ = __bool__ # Python 2 compatibility.
def __lt__(self, other):
if hasattr(other, 'localized_string'):
return self.localized_string < other.localized_string
else:
return self.localized_string < other
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
if hasattr(other, 'localized_string'):
return self.localized_string == other.localized_string
else:
return self.localized_string == other
def __hash__(self):
return hash(self.localized_string)
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
with connections['default'].cursor() as cursor:
cursor.execute(
"""
UPDATE `translations_seq`
SET `id`=LAST_INSERT_ID(
`id` + @@global.auto_increment_increment
)
"""
)
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute(
"""
INSERT INTO `translations_seq` (`id`)
VALUES(LAST_INSERT_ID(
`id` + @@global.auto_increment_increment
))
"""
)
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __str__(self):
if not self.localized_string_clean:
self.clean()
return str(self.localized_string_clean)
def __html__(self):
return str(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(str(self), length, killwords, end)
def clean(self):
from olympia.amo.utils import clean_nl
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkify_filter = partial(
bleach.linkifier.LinkifyFilter,
callbacks=[linkify_bounce_url_callback, bleach.callbacks.nofollow],
)
# Keep only the allowed tags and attributes, escape the rest.
cleaner = bleach.Cleaner(
tags=self.allowed_tags,
attributes=self.allowed_attributes,
filters=[linkify_filter],
)
return cleaner.clean(str(self.localized_string))
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoURLsTranslation(Translation):
"""Regular translation model, but with URLs stripped."""
class Meta:
proxy = True
def __str__(self):
# Clean string if that hasn't been done already, like
# PurifiedTranslation does. Unlike PurifiedTranslation though, this
# class doesn't implement __html__(), because it's designed to contain
# only text. This means that it should be escaped by templates and API
# clients, as it can contain raw HTML.
if not self.localized_string_clean and self.localized_string:
self.clean()
return str(self.localized_string_clean)
def clean(self):
# URL_RE is the regexp used by bleach to detect URLs to linkify them,
# in our case we use it to find them and replace them with nothing.
# It's more effective/aggressive than something like r'http\S+', it can
# also detect things like foo.com.
self.localized_string_clean = URL_RE.sub('', self.localized_string).strip()
class TranslationSequence(models.Model):
"""
The translations_seq table, so migrations will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
| {
"repo_name": "mozilla/addons-server",
"path": "src/olympia/translations/models.py",
"copies": "1",
"size": "9497",
"license": "bsd-3-clause",
"hash": 7924239109088616000,
"line_mean": 33.285198556,
"line_max": 83,
"alpha_frac": 0.6057702432,
"autogenerated": false,
"ratio": 4.29923042100498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.540500066420498,
"avg_score": null,
"num_lines": null
} |
from functools import partial, total_ordering
from django.db import connections, models, router
from django.db.models.deletion import Collector
import bleach
import olympia.core.logger
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import linkify_bounce_url_callback
from . import utils
log = olympia.core.logger.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids), locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
@total_ordering
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = PositiveAutoField(primary_key=True)
id = models.PositiveIntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
constraints = [
models.UniqueConstraint(fields=('id', 'locale'), name='id'),
]
def __str__(self):
return str(self.localized_string) if self.localized_string else ''
def __bool__(self):
# __bool__ is called to evaluate an object in a boolean context.
# We want Translations to be falsy if their string is empty.
return bool(self.localized_string) and bool(self.localized_string.strip())
__nonzero__ = __bool__ # Python 2 compatibility.
def __lt__(self, other):
if hasattr(other, 'localized_string'):
return self.localized_string < other.localized_string
else:
return self.localized_string < other
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
if hasattr(other, 'localized_string'):
return self.localized_string == other.localized_string
else:
return self.localized_string == other
def __hash__(self):
return hash(self.localized_string)
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
with connections['default'].cursor() as cursor:
cursor.execute(
"""
UPDATE `translations_seq`
SET `id`=LAST_INSERT_ID(
`id` + @@global.auto_increment_increment
)
"""
)
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute(
"""
INSERT INTO `translations_seq` (`id`)
VALUES(LAST_INSERT_ID(
`id` + @@global.auto_increment_increment
))
"""
)
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __str__(self):
if not self.localized_string_clean:
self.clean()
return str(self.localized_string_clean)
def __html__(self):
return str(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(str(self), length, killwords, end)
def clean(self):
from olympia.amo.utils import clean_nl
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkify_filter = partial(
bleach.linkifier.LinkifyFilter,
callbacks=[linkify_bounce_url_callback, bleach.callbacks.nofollow],
)
# Keep only the allowed tags and attributes, escape the rest.
cleaner = bleach.Cleaner(
tags=self.allowed_tags,
attributes=self.allowed_attributes,
filters=[linkify_filter],
)
return cleaner.clean(str(self.localized_string))
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoLinksNoMarkupTranslation(LinkifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
def clean_localized_string(self):
# First pass: bleach everything, but leave links untouched.
cleaned = super(LinkifiedTranslation, self).clean_localized_string()
# Second pass: call linkify to empty the inner text of all links.
emptied_links = bleach.linkify(
cleaned, callbacks=[lambda attrs, new: {'_text': ''}]
)
# Third pass: now strip links (only links will be stripped, other
# forbidden tags are already bleached/escaped.
allowed_tags = self.allowed_tags[:] # Make a copy.
allowed_tags.remove('a')
return bleach.clean(emptied_links, tags=allowed_tags, strip=True)
class TranslationSequence(models.Model):
"""
The translations_seq table, so migrations will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
| {
"repo_name": "wagnerand/addons-server",
"path": "src/olympia/translations/models.py",
"copies": "2",
"size": "9239",
"license": "bsd-3-clause",
"hash": -3960415077766215000,
"line_mean": 32.7189781022,
"line_max": 82,
"alpha_frac": 0.6033120468,
"autogenerated": false,
"ratio": 4.297209302325581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000890287291366059,
"num_lines": 274
} |
from functools import partial, total_ordering
import attr
import funcy as fn
from lazytree import LazyTree
from monotone_bipartition import hausdorff as mbph
from monotone_bipartition import rectangles # unit_rec
from monotone_bipartition import refine as mbpr # bounding_box
from monotone_bipartition import search as mdts # binsearch
@attr.s
@total_ordering
class BiPartition:
tree: LazyTree = attr.ib()
func = attr.ib()
@property
def dim(self):
return len(self.tree.view())
def approx(self, tol=1e-4):
recs = self.tree \
.prune(isleaf=lambda r: r.shortest_edge <= tol) \
.leaves()
return list(recs)
def dist(self, other, tol=1e-4) -> rectangles.Interval:
approxes = mbph.gen_dists(self, other)
within_tol = (i for i in approxes if i.radius < tol)
return fn.first(within_tol)
def label(self, point) -> bool:
# TODO: Should support either finite precision or max depth.
domain = rectangles.unit_rec(self.dim)
def above(rec):
return point in domain.forward_cone(rec.bot) and \
point not in domain.backward_cone(rec.top)
def below(rec):
return point not in domain.forward_cone(rec.bot) and \
point in domain.backward_cone(rec.top)
def not_inside(rec):
return point not in domain.forward_cone(rec.bot) or \
point not in domain.backward_cone(rec.top)
recs = self.tree.prune(isleaf=not_inside).bfs()
for rec in recs:
if above(rec):
return True
if not not_inside(rec):
if all(x == 0 for x in rec.diag): # point rec.
return True
elif below(rec):
return all(x == 0 for x in rec.bot)
raise RuntimeError("Point outside domain?!?!?!")
def __le__(self, other):
raise NotImplementedError
def project(self, point_or_ordering, *,
lexicographic=False, tol=1e-4, percent=False):
"""
If lexicographic is False, then returns an approximation
to the *unique* point that intersect the threshold
boundary AND the line intersecting the origin and the
user defined point.
If lexicographic is True, then returns an approximation to the
minimum point on the threshold boundary that is minimum in the
ordering provided. The ordering is given as a list of pairs:
`(index, minimize)`. The index is minimized if `minimize` is
true, and maximized otherwise. For example, `[(1, False), (0,
True)]` encodes maximizing along axis 1 of the unit box and
then minimizing along axis 0.
"""
if lexicographic:
assert not percent
return mdts.lexicographic_opt(self.func, point_or_ordering, tol)
else:
return mdts.line_intersect(
self.func, point_or_ordering, tol, percent=percent)
def from_threshold(func, dim: int, *, memoize_nodes=True,
find_intersect=mdts.binsearch) -> BiPartition:
bounding_box = mbpr.bounding_box(
oracle=func,
domain=rectangles.unit_rec(dim),
find_intersect=find_intersect
)
# Define subdivision along diagonal.
diagsearch = partial(find_intersect, oracle=func)
refine = partial(mbpr.refine, diagsearch=diagsearch)
if memoize_nodes:
refine = fn.memoize()(refine)
return BiPartition(LazyTree(root=bounding_box, child_map=refine), func)
| {
"repo_name": "mvcisback/multidim-threshold",
"path": "monotone_bipartition/bipartition.py",
"copies": "1",
"size": "3594",
"license": "mit",
"hash": -5136908712207879000,
"line_mean": 32.9056603774,
"line_max": 76,
"alpha_frac": 0.6199220924,
"autogenerated": false,
"ratio": 3.8980477223427332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5017969814742733,
"avg_score": null,
"num_lines": null
} |
from functools import partial, update_wrapper
from getpass import getpass
import logging
import os
import sys
from types import MethodType
from proboscis import TestProgram
from gmusicapi.clients import Webclient, Musicmanager
from gmusicapi.protocol.musicmanager import credentials_from_refresh_token
from gmusicapi.test import local_tests, server_tests
from gmusicapi.test.utils import NoticeLogging
travis_id = 'E9:40:01:0E:51:7A'
travis_name = "Travis-CI (gmusicapi)"
# pretend to use test modules to appease flake8
# these need to be imported for implicit test discovery
_, _ = local_tests, server_tests
def freeze_login_details():
"""Searches the environment for credentials, and freezes them to
client.login if found.
If no auth is present in the env, the user is prompted. OAuth is read from
the default path.
If running on Travis, the prompt will never be fired; sys.exit is called
if the envvars are not present.
"""
#Attempt to get auth from environ.
user, passwd, refresh_tok = [os.environ.get(name) for name in
('GM_USER',
'GM_PASS',
'GM_OAUTH')]
on_travis = os.environ.get('TRAVIS')
mm_kwargs = {}
wc_kwargs = {}
has_env_auth = user and passwd and refresh_tok
if not has_env_auth and on_travis:
print 'on Travis but could not read auth from environ; quitting.'
sys.exit(1)
if os.environ.get('TRAVIS'):
#Travis runs on VMs with no "real" mac - we have to provide one.
mm_kwargs.update({'uploader_id': travis_id,
'uploader_name': travis_name})
if has_env_auth:
wc_kwargs.update({'email': user, 'password': passwd})
# mm expects a full OAuth2Credentials object
credentials = credentials_from_refresh_token(refresh_tok)
mm_kwargs.update({'oauth_credentials': credentials})
else:
# no travis, no credentials
# we need to login here to verify their credentials.
# the authenticated api is then thrown away.
wclient = Webclient()
valid_auth = False
print ("These tests will never delete or modify your music."
"\n\n"
"If the tests fail, you *might* end up with a test"
" song/playlist in your library, though."
"You must have oauth credentials stored at the default"
" path by Musicmanager.perform_oauth prior to running.")
while not valid_auth:
print
email = raw_input("Email: ")
passwd = getpass()
valid_auth = wclient.login(email, passwd)
wc_kwargs.update({'email': email, 'password': passwd})
# globally freeze our params in place.
# they can still be overridden manually; they're just the defaults now.
Musicmanager.login = MethodType(
update_wrapper(partial(Musicmanager.login, **mm_kwargs), Musicmanager.login),
None, Musicmanager
)
Webclient.login = MethodType(
update_wrapper(partial(Webclient.login, **wc_kwargs), Webclient.login),
None, Webclient
)
def main():
if '--group=local' not in sys.argv:
freeze_login_details()
root_logger = logging.getLogger('gmusicapi')
# using DynamicClientLoggers eliminates the need for root handlers
# configure_debug_log_handlers(root_logger)
# warnings typically signal a change in protocol,
# so fail the build if anything >= warning are sent,
noticer = NoticeLogging()
noticer.setLevel(logging.WARNING)
root_logger.addHandler(noticer)
# proboscis does not have an exit=False equivalent,
# so SystemExit must be caught instead (we need
# to check the log noticer)
try:
TestProgram().run_and_exit()
except SystemExit as e:
print
if noticer.seen_message:
print '(failing build due to log warnings)'
sys.exit(1)
if e.code is not None:
sys.exit(e.code)
if __name__ == '__main__':
main()
| {
"repo_name": "jimyx17/gmusic",
"path": "gmusicapi/test/run_tests.py",
"copies": "1",
"size": "4107",
"license": "bsd-3-clause",
"hash": -6006899996860778000,
"line_mean": 30.3511450382,
"line_max": 85,
"alpha_frac": 0.6359873387,
"autogenerated": false,
"ratio": 4.0146627565982405,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000782422545152531,
"num_lines": 131
} |
from functools import partial, update_wrapper
import inspect
import logging
from django.conf.urls import url, include
from django.core.urlresolvers import resolve
from django.http.response import HttpResponseNotAllowed, HttpResponse,\
HttpResponseBase
from django.template.response import TemplateResponse
from django.utils.decorators import classonlymethod
from .exceptions import ActionResponse
logger = logging.getLogger('django.actionviews')
class ContextMixin(object):
"""A default context mixin that handles current action and its parent and
passes the result as the template context.
"""
def get_context_data(self, **kwargs):
self.context = {}
if 'parent_action' in kwargs:
parent_kwargs = {param_name: kwargs.pop(param_name) for
param_name in kwargs.pop('parent_params')}
self.context.update(
kwargs.pop('parent_action')(self.request, **parent_kwargs))
action_result = self.action(**kwargs)
if isinstance(action_result, HttpResponseBase):
raise ActionResponse(action_result)
self.context.update(action_result)
return self.context
class ActionViewMeta(type):
def __new__(cls, name, bases, attrs):
# construct class to access parent attributes
type_new = type.__new__(cls, name, bases, attrs)
# get action_method_prefix
action_method_prefix = type_new.action_method_prefix
# find action names and corresponding methods or use defined map
if 'actions' in attrs:
actions = {name: getattr(type_new, attr_name)
for name, attr_name in attrs['actions'].items()}
else:
actions = {}
for attr_name in dir(type_new):
if attr_name.startswith(action_method_prefix):
action_name = attr_name[len(action_method_prefix):]
# avoid empty action_name
if action_name:
actions[action_name] = getattr(type_new, attr_name)
type_new.actions = actions
# construct urls if there is no custom urls defined
if 'urls' not in attrs:
urls = []
for action_name, action_method in type_new.actions.items():
param_names = []
regex_chunks = []
default_values = {}
parameters = inspect.signature(
action_method).parameters.values()
for parameter in parameters:
if parameter.name == 'self':
group_name = (parameter.annotation is inspect._empty
and action_name
or parameter.annotation)
sep = group_name and '/'
regex_chunks.append(r'{}{}'.format(group_name, sep))
continue
group_name = parameter.name
param_names.append(group_name)
if parameter.annotation is inspect._empty:
group_regex = type_new.default_group_regex
else:
group_regex = parameter.annotation
if parameter.default is inspect._empty:
group_format = type_new.group_format
else:
default_values[parameter.name] = parameter.default
group_format = r'({})?'.format(type_new.group_format)
regex_chunks.append(group_format.format(
group_name=group_name, group_regex=group_regex))
url_regex = r'^{}'.format(''.join(regex_chunks))
action_method.name = action_name
if hasattr(action_method, 'child_view'):
view = include(action_method.child_view.urls)
default_values.update({
'parent_action': type_new.as_parent_action(
action_method),
'parent_params': param_names,
})
else:
url_regex += r'$'
view = type_new.as_view(action_method)
urls.append(url(
regex=url_regex,
view=view,
kwargs=default_values,
name=action_name))
type_new.urls = urls
return type_new
class BaseView(metaclass=ActionViewMeta):
action_method_prefix = 'do_'
group_format = r'{group_name}/(?P<{group_name}>{group_regex})/'
default_group_regex = r'[\w\d]+'
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head',
'options', 'trace']
@classonlymethod
def as_parent_action(cls, action_method): # @NoSelf
"""Action method to parent action factory.
"""
def action(request, *args, **kwargs):
# get view class instance
self = cls()
# set instance attributes
self.request = request
self.args = args
self.kwargs = kwargs
# return parent data
return action_method(self, **kwargs)
# make action look like actual action_method
update_wrapper(action, action_method)
return action
@classonlymethod
def as_view(cls, action): # @NoSelf
"""Action method to view factory.
"""
def view(request, *args, **kwargs):
# get view class instance
self = cls()
# set instance attributes
self.request = request
self.args = args
self.kwargs = kwargs
# make self.action look like actual action method
self.action = partial(action, self)
update_wrapper(self.action, action)
# dispatch request
return self.dispatch(request, *args, **kwargs)
# make view look like action
update_wrapper(view, action)
return view
def dispatch(self, request, *args, **kwargs):
"""Try to dispatch to the right action; defer to the error handler if
the request method isn't on the approved list.
"""
http_method_names = getattr(
self.action, 'allowed_methods', self.http_method_names)
if request.method.lower() in http_method_names:
handler = getattr(
self,
request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
try:
return handler(request, *args, **kwargs)
except ActionResponse as e:
return e.response
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning(
'Method Not Allowed (%s): %s',
request.method,
request.path,
extra={
'status_code': 405,
'request': self.request
}
)
return HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return map(str.upper,
filter(partial(hasattr, self),
getattr(
self.action,
'allowed_methods',
self.http_method_names)))
class View(BaseView, ContextMixin):
pass
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = '{namespace}/{view_name}/{action_name}.html'
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names parsed from template_name using action
method name to be used for the request. Must return a list. May not be
called if render_to_response is overridden.
"""
return [self.template_name.format_map({
'namespace': resolve(self.request.path).namespace,
'view_name': self.__class__.__name__,
'action_name': self.action.name,
}).strip('/')]
class TemplateView(TemplateResponseMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
# support basic methods by default
post = head = get
class DummyView(View):
""" Dummy view for testing purposes
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return context
# support basic methods by default
post = head = get
| {
"repo_name": "lig/django-actionviews",
"path": "actionviews/base.py",
"copies": "1",
"size": "9751",
"license": "bsd-3-clause",
"hash": 7132266799330091000,
"line_mean": 30.6590909091,
"line_max": 79,
"alpha_frac": 0.5578914983,
"autogenerated": false,
"ratio": 4.67673860911271,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.573463010741271,
"avg_score": null,
"num_lines": null
} |
from functools import partial, update_wrapper
class SelfWrapper:
"""Wraps class attributes that could be overruled by instance attributes"""
_ATTRIBUTES = "Attributes"
def __init__(self, wrapped, attributelist):
self._attributelist = attributelist
self._wrapped = wrapped
self._cls = type(wrapped)
def _get_prop(self, attr):
return self._cls.__dict__[attr]
def __getattr__(self, attr):
if attr not in self._attributelist:
raise AttributeError(attr)
prop = self._get_prop(attr)
if callable(prop):
wprop = partial(prop, self._wrapped)
update_wrapper(wrapped=prop, wrapper=wprop)
return wprop
elif isinstance(prop, property):
return prop.fget(self._wrapped)
else:
return prop
def __dir__(self):
return self._attributelist
def __str__(self):
return self._ATTRIBUTES + " of " + str(self._wrapped)
def __repr__(self):
return str(self)
class ChildrenWrapper(SelfWrapper):
_ATTRIBUTES = "Children"
def _get_prop(self, attr):
return self._wrapped._get_path((attr,))
| {
"repo_name": "sjdv1982/seamless",
"path": "seamless/highlevel/SelfWrapper.py",
"copies": "1",
"size": "1185",
"license": "mit",
"hash": 9142669370276401000,
"line_mean": 27.9268292683,
"line_max": 79,
"alpha_frac": 0.5991561181,
"autogenerated": false,
"ratio": 4.247311827956989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014664455855084866,
"num_lines": 41
} |
from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| {
"repo_name": "alilotfi/django",
"path": "tests/urlpatterns_reverse/views.py",
"copies": "67",
"size": "1466",
"license": "bsd-3-clause",
"hash": -5572186497445394000,
"line_mean": 22.6451612903,
"line_max": 92,
"alpha_frac": 0.727148704,
"autogenerated": false,
"ratio": 3.665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009521351194552784,
"num_lines": 62
} |
from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse()
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse()
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse()
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass:
def __call__(self, request, *args, **kwargs):
return HttpResponse()
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated, login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_nested_partial = partial(empty_view_partial, template_name="nested_partial.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| {
"repo_name": "kaedroho/django",
"path": "tests/urlpatterns_reverse/views.py",
"copies": "57",
"size": "1528",
"license": "bsd-3-clause",
"hash": 3223045688764645400,
"line_mean": 23.253968254,
"line_max": 92,
"alpha_frac": 0.7342931937,
"autogenerated": false,
"ratio": 3.6208530805687205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial, update_wrapper
from django.http import HttpResponse
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import user_passes_test
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = "Can I be a view? Pleeeease?"
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| {
"repo_name": "kalahbrown/HueBigSQL",
"path": "desktop/core/ext-py/Django-1.6.10/tests/urlpatterns_reverse/views.py",
"copies": "64",
"size": "1511",
"license": "apache-2.0",
"hash": 1384393102973570800,
"line_mean": 25.5087719298,
"line_max": 92,
"alpha_frac": 0.7279947055,
"autogenerated": false,
"ratio": 3.563679245283019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial, update_wrapper
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from .models import (URLRedirect, URLVisible, SimpleAccessRestriction,
GroupAccessRestriction, UserAccessRestriction)
from .admin_filters import (RedirectFilter, AccessFilter, PublishedFilter,
GroupFilter, UserFilter)
from .middleware_filters import (UserRoleRequired, UserRequired, GroupRequired,
RedirectRequired, PublishedRequired)
class URLInline(admin.StackedInline):
extra = 0
def get_urladmin_display_func(self, relation_name=None):
func = partial(self.get_urladmin_display, relation_name=relation_name)
func2 = update_wrapper(func, self.get_urladmin_display)
return func2
class RedirectInline(URLInline):
model = URLRedirect
max_num = 1
def get_urladmin_display(self, obj, relation_name):
related_instance = getattr(obj, relation_name, None)
if related_instance is None:
return False
return len(related_instance.get_absolute_url()) > 0
get_urladmin_display.short_description = _("Redirect")
get_urladmin_display.boolean = True
def get_urladmin_filter_cls(self, *args, **kwargs):
return RedirectFilter
def get_churlish_middlewares(self):
return (RedirectRequired,)
class VisibleInline(URLInline):
model = URLVisible
max_num = 1
def get_urladmin_display(self, obj, relation_name):
"""
This one is inverted, so that "is published" is ticked when
nothing is there
"""
related_instance = getattr(obj, relation_name, None)
if related_instance is None:
return True
return related_instance.is_published
get_urladmin_display.short_description = _("Published")
get_urladmin_display.boolean = True
def get_urladmin_filter_cls(self, *args, **kwargs):
return PublishedFilter
def get_churlish_middlewares(self):
return (PublishedRequired,)
class SimpleAccessInline(URLInline):
model = SimpleAccessRestriction
extra = 1
def get_urladmin_display(self, obj, relation_name):
related_instance = getattr(obj, relation_name, None)
if related_instance is None:
return False
return related_instance.has_restriction()
get_urladmin_display.short_description = _("Login Restricted")
get_urladmin_display.boolean = True
def get_urladmin_filter_cls(self, *args, **kwargs):
return AccessFilter
def get_churlish_middlewares(self):
return (UserRoleRequired,)
class GroupAccessInline(URLInline):
model = GroupAccessRestriction
def get_urladmin_display(self, obj, relation_name):
related_instance = getattr(obj, relation_name, None)
if related_instance is None:
return False
return related_instance.exists()
get_urladmin_display.short_description = _("Group Restricted")
get_urladmin_display.boolean = True
def get_urladmin_filter_cls(self, *args, **kwargs):
return GroupFilter
def get_churlish_middlewares(self):
return (GroupRequired,)
class UserAccessInline(URLInline):
model = UserAccessRestriction
def get_urladmin_display(self, obj, relation_name):
related_instance = getattr(obj, relation_name, None)
if related_instance is None:
return False
return related_instance.exists()
get_urladmin_display.short_description = _("User Restricted")
get_urladmin_display.boolean = True
def get_urladmin_filter_cls(self, *args, **kwargs):
return UserFilter
def get_churlish_middlewares(self):
return (UserRequired,)
| {
"repo_name": "kezabelle/django-churlish",
"path": "churlish/admin_inlines.py",
"copies": "1",
"size": "3958",
"license": "bsd-2-clause",
"hash": 1893558926794643700,
"line_mean": 31.8290598291,
"line_max": 79,
"alpha_frac": 0.66195048,
"autogenerated": false,
"ratio": 4.224119530416222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5386070010416222,
"avg_score": null,
"num_lines": null
} |
from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_nested_partial = partial(empty_view_partial, template_name="nested_partial.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| {
"repo_name": "yephper/django",
"path": "tests/urlpatterns_reverse/views.py",
"copies": "1",
"size": "1605",
"license": "bsd-3-clause",
"hash": -2409359147976676000,
"line_mean": 24.3114754098,
"line_max": 92,
"alpha_frac": 0.7028037383,
"autogenerated": false,
"ratio": 3.7238979118329465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9921097835146502,
"avg_score": 0.0011207629972889827,
"num_lines": 61
} |
from functools import partial, update_wrapper, wraps
import os
import signal
import subprocess
import pygmi
__all__ = 'call', 'message', 'program_list', 'curry', 'find_script', '_', 'prop'
def _():
pass
def call(*args, **kwargs):
background = kwargs.pop('background', False)
stdin = subprocess.PIPE if not background else open('/dev/null', 'r')
pipe = subprocess.PIPE if not background else None
input = kwargs.pop('input', None)
p = subprocess.Popen(args, stdin=stdin, stdout=pipe, stderr=pipe,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
cwd=os.environ['HOME'], close_fds=True, **kwargs)
if not background:
return p.communicate(input)[0].rstrip('\n')
def message(message):
args = ['xmessage', '-file', '-'];
font = pygmi.wmii['font']
if not font.startswith('xft:'):
args += ['-fn', font.split(',')[0]]
call(*args, input=message)
def program_list(path):
names = set()
for d in path:
try:
for f in os.listdir(d):
p = '%s/%s' % (d, f)
if (f not in names and os.access(p, os.X_OK) and
os.path.isfile(p)):
names.add(f)
except Exception:
pass
return sorted(names)
def curry(func, *args, **kwargs):
if _ in args:
blank = [i for i in range(0, len(args)) if args[i] is _]
@wraps(func)
def curried(*newargs, **newkwargs):
ary = list(args)
for k, v in zip(blank, newargs):
ary[k] = v
ary = tuple(ary) + newargs[len(blank):]
return func(*ary, **dict(kwargs, **newkwargs))
else:
curried = update_wrapper(partial(func, *args, **kwargs), func)
curried.__name__ += '__curried__'
return curried
def find_script(name):
for path in pygmi.confpath:
if os.access('%s/%s' % (path, name), os.X_OK):
return '%s/%s' % (path, name)
def prop(**kwargs):
def prop_(wrapped):
kwargs['fget'] = wrapped
return property(**kwargs)
return prop_
# vim:se sts=4 sw=4 et:
| {
"repo_name": "darkfeline/wmii",
"path": "alternative_wmiircs/python/pygmi/util.py",
"copies": "9",
"size": "2163",
"license": "mit",
"hash": 3066492827874414600,
"line_mean": 29.9,
"line_max": 90,
"alpha_frac": 0.5506241331,
"autogenerated": false,
"ratio": 3.444267515923567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007108732742334009,
"num_lines": 70
} |
from functools import partial, WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES
from toolz.compatibility import *
from toolz.compatibility import __all__ as compat_all
__all__ = compat_all + ('update_wrapper', 'wraps')
# pending removal based on acceptance of toolz pull request 230
# see: https://github.com/pytoolz/toolz/pull/230
def update_wrapper(wrapper, wrapped,
assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""Makes a wrapping object appear and act like the underlying wrapped
object.
This is a backport of Python 3.4's update_wrapper which has much smarter
behavior than the update_wrapper that exists in prior versions as it
uses a try/except/else block rather than charitably assuming all items
exist on the wrapped object (functools.partial has no `__name__` attribute
for example).
It also backports the `__wrapped__` attribute for easy access to the
original object.
WARNING!!: This function modifies the wrapping object!
This is more useful for class based wrappers, for function wrappers see
``wraps`` below.
>>> class Decorator(object):
... "Wraps a function in a callable object."
... def __init__(self, f):
... update_wrapper(self, f)
... self.f = f
... def __call__(self, *args, **kwargs):
... return self.f(*args, **kwargs)
>>> @Decorator
... def add(x, y):
... "Adds two objects."
... return x + y
>>> print(add.__name__)
add
>>> print(add.__doc__)
Adds two objects
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr, {}).update(getattr(wrapped, attr, {}))
# store original callable last so it isn't accidentally copied over
# in event of wrapping multiple times
wrapper.__wrapped__ = wrapped
return wrapper
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES):
"""Decorator form of ``update_wrapper``.
This is very useful for writing closure based decorators
rather than manually using ``update_wrapper`` on the closure.
WARNING!!: This function modifies the function it is applied to!
"""
return partial(update_wrapper, wrapped=wrapped, #noqa
assigned=assigned, updated=updated) #noqa
| {
"repo_name": "justanr/toolshed",
"path": "toolshed/compatibility.py",
"copies": "1",
"size": "2497",
"license": "bsd-3-clause",
"hash": 4888874798913415000,
"line_mean": 33.2054794521,
"line_max": 78,
"alpha_frac": 0.6403684421,
"autogenerated": false,
"ratio": 4.290378006872852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015306368535663603,
"num_lines": 73
} |
from functools import partial, wraps
from .base import DuplicateLabel
from .pyll.base import Apply, Literal
from .pyll import scope
from .pyll import as_apply
def validate_label(f):
@wraps(f)
def wrapper(label, *args, **kwargs):
is_real_string = isinstance(label, basestring)
is_literal_string = (isinstance(label, Literal) and
isinstance(label.obj, basestring))
if not is_real_string and not is_literal_string:
raise TypeError('require string label')
return f(label, *args, **kwargs)
return wrapper
#
# Hyperparameter Types
#
@scope.define
def hyperopt_param(label, obj):
""" A graph node primarily for annotating - VectorizeHelper looks out
for these guys, and optimizes subgraphs of the form:
hyperopt_param(<stochastic_expression>(...))
"""
return obj
@validate_label
def hp_pchoice(label, p_options):
"""
label: string
p_options: list of (probability, option) pairs
"""
p, options = zip(*p_options)
n_options = len(options)
ch = scope.hyperopt_param(label,
scope.categorical(
p,
upper=n_options))
return scope.switch(ch, *options)
@validate_label
def hp_choice(label, options):
ch = scope.hyperopt_param(label,
scope.randint(len(options)))
return scope.switch(ch, *options)
@validate_label
def hp_randint(label, *args, **kwargs):
return scope.hyperopt_param(label,
scope.randint(*args, **kwargs))
@validate_label
def hp_uniform(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.uniform(*args, **kwargs)))
@validate_label
def hp_quniform(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.quniform(*args, **kwargs)))
@validate_label
def hp_loguniform(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.loguniform(*args, **kwargs)))
@validate_label
def hp_qloguniform(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.qloguniform(*args, **kwargs)))
@validate_label
def hp_normal(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.normal(*args, **kwargs)))
@validate_label
def hp_qnormal(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.qnormal(*args, **kwargs)))
@validate_label
def hp_lognormal(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.lognormal(*args, **kwargs)))
@validate_label
def hp_qlognormal(label, *args, **kwargs):
return scope.float(
scope.hyperopt_param(label,
scope.qlognormal(*args, **kwargs)))
#
# Tools for extracting a search space from a Pyll graph
#
class Cond(object):
def __init__(self, name, val, op):
self.op = op
self.name = name
self.val = val
def __str__(self):
return 'Cond{%s %s %s}' % (self.name, self.op, self.val)
def __eq__(self, other):
return self.op == other.op and self.name == other.name and self.val == other.val
def __hash__(self):
return hash((self.op, self.name, self.val))
def __repr__(self):
return str(self)
EQ = partial(Cond, op='=')
def _expr_to_config(expr, conditions, hps):
if expr.name == 'switch':
idx = expr.inputs()[0]
options = expr.inputs()[1:]
assert idx.name == 'hyperopt_param'
assert idx.arg['obj'].name in (
'randint', # -- in case of hp.choice
'categorical', # -- in case of hp.pchoice
)
_expr_to_config(idx, conditions, hps)
for ii, opt in enumerate(options):
_expr_to_config(opt,
conditions + (EQ(idx.arg['label'].obj, ii),),
hps)
elif expr.name == 'hyperopt_param':
label = expr.arg['label'].obj
if label in hps:
if hps[label]['node'] != expr.arg['obj']:
raise DuplicateLabel(label)
hps[label]['conditions'].add(conditions)
else:
hps[label] = {'node': expr.arg['obj'],
'conditions': set((conditions,)),
'label': label,
}
else:
for ii in expr.inputs():
_expr_to_config(ii, conditions, hps)
def expr_to_config(expr, conditions, hps):
"""
Populate dictionary `hps` with the hyperparameters in pyll graph `expr`
and conditions for participation in the evaluation of `expr`.
Arguments:
expr - a pyll expression root.
conditions - a tuple of conditions (`Cond`) that must be True for
`expr` to be evaluated.
hps - dictionary to populate
Creates `hps` dictionary:
label -> { 'node': apply node of hyperparameter distribution,
'conditions': `conditions` + tuple,
'label': label
}
"""
expr = as_apply(expr)
if conditions is None:
conditions = ()
assert isinstance(expr, Apply)
_expr_to_config(expr, conditions, hps)
_remove_allpaths(hps, conditions)
def _remove_allpaths(hps, conditions):
"""Hacky way to recognize some kinds of false dependencies
Better would be logic programming.
"""
potential_conds = {}
for k, v in hps.items():
if v['node'].name in ('randint', 'categorical'):
upper = v['node'].arg['upper'].obj
potential_conds[k] = frozenset([EQ(k, ii) for ii in range(upper)])
for k, v in hps.items():
if len(v['conditions']) > 1:
all_conds = [[c for c in cond if c is not True]
for cond in v['conditions']]
all_conds = [cond for cond in all_conds if len(cond) >= 1]
if len(all_conds) == 0:
v['conditions'] = set([conditions])
continue
depvar = all_conds[0][0].name
all_one_var = all(len(cond) == 1 and cond[0].name == depvar
for cond in all_conds)
if all_one_var:
conds = [cond[0] for cond in all_conds]
if frozenset(conds) == potential_conds[depvar]:
v['conditions'] = set([conditions])
continue
# -- eof
| {
"repo_name": "dudalev/hyperopt",
"path": "hyperopt/pyll_utils.py",
"copies": "1",
"size": "6637",
"license": "bsd-3-clause",
"hash": 637492806938818600,
"line_mean": 28.1096491228,
"line_max": 88,
"alpha_frac": 0.5592888353,
"autogenerated": false,
"ratio": 3.8408564814814814,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4900145316781481,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from collections import namedtuple
from .predicate import match_instance
from .predicate import PredicateRegistry
from .arginfo import arginfo
from .error import RegistrationError
class dispatch:
"""Decorator to make a function dispatch based on its arguments.
This takes the predicates to dispatch on as zero or more
parameters.
:param predicates: sequence of :class:`reg.Predicate` instances to
do the dispatch on. You create predicates using
:func:`reg.match_instance`, :func:`reg.match_key`,
:func:`reg.match_class`, or with a custom predicate class. You
can also pass in plain string argument, which is turned into a
:func:`reg.match_instance` predicate.
:param get_key_lookup: a function that gets a
:class:`PredicateRegistry` instance and returns a key lookup. A
:class:`PredicateRegistry` instance is itself a key lookup, but
you can return a caching key lookup (such as
:class:`reg.DictCachingKeyLookup` or
:class:`reg.LruCachingKeyLookup`) to make it more efficient.
:returns: a function that you can use as if it were a
:class:`reg.Dispatch` instance.
"""
def __init__(self, *predicates, **kw):
self.predicates = [self._make_predicate(predicate) for predicate in predicates]
self.get_key_lookup = kw.pop("get_key_lookup", identity)
def _make_predicate(self, predicate):
if isinstance(predicate, str):
return match_instance(predicate)
return predicate
def __call__(self, callable):
return Dispatch(self.predicates, callable, self.get_key_lookup).call
def identity(registry):
return registry
class LookupEntry(namedtuple("LookupEntry", "lookup key")):
"""The dispatch data associated to a key."""
__slots__ = ()
@property
def component(self):
"""The function to dispatch to, excluding fallbacks."""
return self.lookup.component(self.key)
@property
def fallback(self):
"""The approriate fallback implementation."""
return self.lookup.fallback(self.key)
@property
def matches(self):
"""An iterator over all the compatible implementations."""
return self.lookup.all(self.key)
@property
def all_matches(self):
"""The list of all compatible implementations."""
return list(self.matches)
class Dispatch:
"""Dispatch function.
You can register implementations based on particular predicates. The
dispatch function dispatches to these implementations based on its
arguments.
:param predicates: a list of predicates.
:param callable: the Python function object to register dispatch
implementations for. The signature of an implementation needs to
match that of this function. This function is used as a fallback
implementation that is called if no specific implementations match.
:param get_key_lookup: a function that gets a
:class:`PredicateRegistry` instance and returns a key lookup. A
:class:`PredicateRegistry` instance is itself a key lookup, but
you can return a caching key lookup (such as
:class:`reg.DictCachingKeyLookup` or
:class:`reg.LruCachingKeyLookup`) to make it more efficient.
"""
def __init__(self, predicates, callable, get_key_lookup):
self.wrapped_func = callable
self.get_key_lookup = get_key_lookup
self._original_predicates = predicates
self._define_call()
self._register_predicates(predicates)
def _register_predicates(self, predicates):
self.registry = PredicateRegistry(*predicates)
self.predicates = predicates
self.call.key_lookup = self.key_lookup = self.get_key_lookup(self.registry)
self.call.__globals__.update(
_registry_key=self.registry.key,
_component_lookup=self.key_lookup.component,
_fallback_lookup=self.key_lookup.fallback,
)
self._predicate_key.__globals__.update(
_registry_key=self.registry.key,
_return_type=partial(LookupEntry, self.key_lookup),
)
def _define_call(self):
# We build the generic function on the fly. Its definition
# requires the signature of the wrapped function and the
# arguments needed by the registered predicates
# (predicate_args):
code_template = """\
def call({signature}):
_key = _registry_key({predicate_args})
return (_component_lookup(_key) or
_fallback_lookup(_key) or
_fallback)({signature})
"""
args = arginfo(self.wrapped_func)
signature = format_signature(args)
predicate_args = ", ".join("{0}={0}".format(x) for x in args.args)
code_source = code_template.format(
signature=signature, predicate_args=predicate_args
)
# We now compile call to byte-code:
self.call = call = wraps(self.wrapped_func)(
execute(
code_source,
_registry_key=None,
_component_lookup=None,
_fallback_lookup=None,
_fallback=self.wrapped_func,
)["call"]
)
# We copy over the defaults from the wrapped function.
call.__defaults__ = args.defaults
# Make the methods available as attributes of call
for k in dir(type(self)):
if not k.startswith("_"):
setattr(call, k, getattr(self, k))
call.wrapped_func = self.wrapped_func
# We now build the implementation for the predicate_key method
self._predicate_key = execute(
"def predicate_key({signature}):\n"
" return _return_type(_registry_key({predicate_args}))".format(
signature=format_signature(args), predicate_args=predicate_args
),
_registry_key=None,
_return_type=None,
)["predicate_key"]
def clean(self):
"""Clean up implementations and added predicates.
This restores the dispatch function to its original state,
removing registered implementations and predicates added
using :meth:`reg.Dispatch.add_predicates`.
"""
self._register_predicates(self._original_predicates)
def add_predicates(self, predicates):
"""Add new predicates.
Extend the predicates used by this predicates. This can be
used to add predicates that are configured during startup time.
Note that this clears up any registered implementations.
:param predicates: a list of predicates to add.
"""
self._register_predicates(self.predicates + predicates)
def register(self, func=None, **key_dict):
"""Register an implementation.
If ``func`` is not specified, this method can be used as a
decorator and the decorated function will be used as the
actual ``func`` argument.
:param func: a function that implements behavior for this
dispatch function. It needs to have the same signature as
the original dispatch function. If this is a
:class:`reg.DispatchMethod`, then this means it needs to
take a first context argument.
:param key_dict: keyword arguments describing the registration,
with as keys predicate name and as values predicate values.
:returns: ``func``.
"""
if func is None:
return partial(self.register, **key_dict)
validate_signature(func, self.wrapped_func)
predicate_key = self.registry.key_dict_to_predicate_key(key_dict)
self.registry.register(predicate_key, func)
return func
def by_args(self, *args, **kw):
"""Lookup an implementation by invocation arguments.
:param args: positional arguments used in invocation.
:param kw: named arguments used in invocation.
:returns: a :class:`reg.LookupEntry`.
"""
return self._predicate_key(*args, **kw)
def by_predicates(self, **predicate_values):
"""Lookup an implementation by predicate values.
:param predicate_values: the values of the predicates to lookup.
:returns: a :class:`reg.LookupEntry`.
"""
return LookupEntry(
self.key_lookup,
self.registry.key_dict_to_predicate_key(predicate_values),
)
def validate_signature(f, dispatch):
f_arginfo = arginfo(f)
if f_arginfo is None:
raise RegistrationError(
"Cannot register non-callable for dispatch " "%r: %r" % (dispatch, f)
)
if not same_signature(arginfo(dispatch), f_arginfo):
raise RegistrationError(
"Signature of callable dispatched to (%r) "
"not that of dispatch (%r)" % (f, dispatch)
)
def format_signature(args):
return ", ".join(
args.args
+ (["*" + args.varargs] if args.varargs else [])
+ (["**" + args.varkw] if args.varkw else [])
)
def same_signature(a, b):
"""Check whether a arginfo and b arginfo are the same signature.
Actual names of arguments may differ. Default arguments may be
different.
"""
a_args = set(a.args)
b_args = set(b.args)
return len(a_args) == len(b_args) and a.varargs == b.varargs and a.varkw == b.varkw
def execute(code_source, **namespace):
"""Execute code in a namespace, returning the namespace."""
code_object = compile(code_source, f"<generated code: {code_source}>", "exec")
exec(code_object, namespace)
return namespace
| {
"repo_name": "morepath/reg",
"path": "reg/dispatch.py",
"copies": "1",
"size": "9645",
"license": "bsd-3-clause",
"hash": -7276508442917660000,
"line_mean": 35.1235955056,
"line_max": 87,
"alpha_frac": 0.6414722654,
"autogenerated": false,
"ratio": 4.296213808463252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022050692642074337,
"num_lines": 267
} |
from functools import partial, wraps
from flask import Flask, request, send_file, make_response, Response
from flask.ext.restful import abort, Api, Resource
#from flask.ext.restful.utils import cors
import cors # until twilio/flask-restful/pull/276 is merged, see the package
from landmarkerio import Server, Endpoints, Mimetype
url = lambda *x: '/' + '/'.join(x)
asset = lambda f: partial(f, '<string:asset_id>')
def safe_send(x, fail_message):
try:
return x
except Exception as e:
print(e)
return abort(404, message=fail_message)
def safe_send_file(mimetype, path, fail_message, gzip=False):
try:
r = make_response(send_file(path, mimetype=mimetype))
if gzip:
r.headers['Content-Encoding'] = 'gzip'
return r
except Exception as e:
print(e)
return abort(404, message=fail_message)
image_file = partial(safe_send_file, Mimetype.jpeg)
json_file = partial(safe_send_file, Mimetype.json)
gzip_json_file = partial(safe_send_file, Mimetype.json, gzip=True)
binary_file = partial(safe_send_file, Mimetype.binary)
gzip_binary_file = partial(binary_file, gzip=True)
def basicauth(username, password):
r"""Returns a decorator that will validate the request for Basic Auth with
the provided username and password. Will return a 401 if the request
cannot be fullfilled.
"""
def check_auth(username_test, password_test):
"""This function is called to check if a username /
password combination is valid.
"""
return username_test == username and password_test == password
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
return requires_auth
def lmio_api(dev=False, username=None, password=None):
r"""
Generate a Flask App that will serve meshes landmarks and templates to
landmarker.io
Parameters
----------
adapter: :class:`LandmarkerIOAdapter`
Concrete implementation of the LandmarkerIOAdapter. Will be queried for
all data to pass to landmarker.io.
dev: `bool`, optional
If True, listen to anyone for CORS.
username : str, optional
If provided basic auth will be applied for this username. Requires
password to also be provided.
password : str, optional
If provided basic auth will be applied for this password. Requires
username to also be provided.
Returns
-------
api, app, api_endpoint
"""
app = Flask(__name__) # create the flask app
# 1. configure CORS decorator
cors_dict = {
'allowed_origins': Server.allowed_origins,
'headers': ['Origin', 'X-Requested-With', 'Content-Type', 'Accept'],
'methods': ['HEAD', 'GET', 'POST', 'PATCH', 'PUT', 'OPTIONS', 'DELETE'],
'credentials': True
}
if dev:
# in development mode we can't use basic auth
cors_dict['credentials'] = False
app.debug = True
# create the cors decorator
decorators = [cors.crossdomain(**cors_dict)]
if username is not None and password is not None:
print('enabling basic auth')
# note the we cors is the last decorator -> the first that is hit. This
# is what we want as CORS will detect OPTIONS requests and allow them
# immediately. All other requests will be sent through the basicauth
# decorator.
decorators.insert(0, basicauth(username, password))
api = Api(app, decorators=decorators)
return api, app
def add_mode_endpoint(api, mode):
if mode not in ['image', 'mesh']:
raise ValueError("Mode can only be 'image' or 'mesh', "
"not {}".format(mode))
class Mode(Resource):
def get(self):
return mode
api.add_resource(Mode, url(Endpoints.mode))
def add_lm_endpoints(api, lm_adapter, template_adapter):
r"""
Generate a Flask App that will serve meshes landmarks and templates to
landmarker.io
Parameters
----------
adapter: :class:`LandmarkerIOAdapter`
Concrete implementation of the LandmarkerIOAdapter. Will be queried for
all data to pass to landmarker.io.
dev: `bool`, optional
If True, listen to anyone for CORS.
Returns
-------
api, app, api_endpoint
"""
class Landmark(Resource):
def get(self, asset_id, lm_id):
err = "{} does not have {} landmarks".format(asset_id, lm_id)
try:
return lm_adapter.load_lm(asset_id, lm_id)
except Exception as e:
try:
return template_adapter.load_template(lm_id)
except Exception as e:
return abort(404, message=err)
def put(self, asset_id, lm_id):
try:
return lm_adapter.save_lm(asset_id, lm_id, request.json)
except Exception as e:
print(e)
return abort(409, message="{}:{} unable to "
"save".format(asset_id, lm_id))
# Need this here to enable CORS put see http://mzl.la/1rCDkWX
def options(self, asset_id, lm_id):
pass
class LandmarkList(Resource):
def get(self):
return lm_adapter.asset_id_to_lm_id()
class LandmarkListForId(Resource):
def get(self, asset_id):
return lm_adapter.lm_ids(asset_id)
lm_url = partial(url, Endpoints.landmarks)
api.add_resource(LandmarkList, lm_url())
api.add_resource(LandmarkListForId, asset(lm_url)())
api.add_resource(Landmark, asset(lm_url)('<string:lm_id>'))
def add_template_endpoints(api, adapter):
class Template(Resource):
def get(self, lm_id):
err = "{} template does not exist".format(lm_id)
return safe_send(adapter.load_template(lm_id), err)
class TemplateList(Resource):
def get(self):
return adapter.template_ids()
templates_url = partial(url, Endpoints.templates)
api.add_resource(TemplateList, templates_url())
api.add_resource(Template, templates_url('<string:lm_id>'))
def add_collection_endpoints(api, adapter):
class Collection(Resource):
def get(self, collection_id):
err = "{} collection not exist".format(collection_id)
return safe_send(adapter.collection(collection_id), err)
class CollectionList(Resource):
def get(self):
return adapter.collection_ids()
collections_url = partial(url, Endpoints.collections)
api.add_resource(CollectionList, collections_url())
api.add_resource(Collection, collections_url('<string:collection_id>'))
def add_image_endpoints(api, adapter):
r"""
Generate a Flask App that will serve images, landmarks and templates to
landmarker.io
Parameters
----------
adapter: :class:`ImageLandmarkerIOAdapter`
Concrete implementation of the Image adapter. Will be queried for
all data to pass to landmarker.io.
"""
class Image(Resource):
def get(self, asset_id):
err = "{} does not have an image".format(asset_id)
return json_file(adapter.image_info(asset_id), err)
class ImageList(Resource):
def get(self):
return adapter.asset_ids()
class Texture(Resource):
def get(self, asset_id):
err = "{} does not have a texture".format(asset_id)
return image_file(adapter.texture_file(asset_id), err)
class Thumbnail(Resource):
def get(self, asset_id):
err = "{} does not have a thumbnail".format(asset_id)
return image_file(adapter.thumbnail_file(asset_id), err)
image_url = partial(url, Endpoints.images)
texture_url = partial(url, Endpoints.textures)
thumbnail_url = partial(url, Endpoints.thumbnail)
api.add_resource(ImageList, image_url())
api.add_resource(Image, asset(image_url)())
api.add_resource(Texture, asset(texture_url)())
api.add_resource(Thumbnail, asset(thumbnail_url)())
def add_mesh_endpoints(api, adapter):
class Mesh(Resource):
def get(self, asset_id):
err = "{} is not an available mesh".format(asset_id)
return gzip_binary_file(adapter.mesh(asset_id), err)
class MeshList(Resource):
def get(self):
return adapter.asset_ids()
mesh_url = partial(url, Endpoints.meshes)
mesh_asset_url = asset(mesh_url)
api.add_resource(MeshList, mesh_url())
api.add_resource(Mesh, mesh_asset_url())
| {
"repo_name": "jabooth/landmarkerio-server",
"path": "landmarkerio/server.py",
"copies": "1",
"size": "9111",
"license": "bsd-3-clause",
"hash": -3115408170092723000,
"line_mean": 29.8847457627,
"line_max": 80,
"alpha_frac": 0.6252881133,
"autogenerated": false,
"ratio": 3.9492847854356308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035239796821717723,
"num_lines": 295
} |
from functools import partial, wraps
from inspect import getargspec
from .op import identity, flip
class F(object):
"""Provide simple syntax for functions composition
(through << and >> operators) and partial function
application (through simple tuple syntax).
Usage example:
>>> func = F() << (_ + 10) << (_ + 5)
>>> print(func(10))
25
>>> func = F() >> (filter, _ < 6) >> sum
>>> print(func(range(10)))
15
"""
__slots__ = "f",
def __init__(self, f = identity, *args, **kwargs):
self.f = partial(f, *args, **kwargs) if any([args, kwargs]) else f
@classmethod
def __compose(cls, f, g):
"""Produces new class intance that will
execute given functions one by one. Internal
method that was added to avoid code duplication
in other methods.
"""
return cls(lambda *args, **kwargs: f(g(*args, **kwargs)))
def __ensure_callable(self, f):
"""Simplify partial execution syntax.
Rerurn partial function built from tuple
(func, arg1, arg2, ...)
"""
return self.__class__(*f) if isinstance(f, tuple) else f
def __rshift__(self, g):
"""Overload >> operator for F instances"""
return self.__class__.__compose(self.__ensure_callable(g), self.f)
def __lshift__(self, g):
"""Overload << operator for F instances"""
return self.__class__.__compose(self.f, self.__ensure_callable(g))
def __call__(self, *args, **kwargs):
"""Overload apply operator"""
return self.f(*args, **kwargs)
def curried(func):
"""A decorator that makes the function curried
Usage example:
>>> @curried
... def sum5(a, b, c, d, e):
... return a + b + c + d + e
...
>>> sum5(1)(2)(3)(4)(5)
15
>>> sum5(1, 2, 3)(4, 5)
15
"""
@wraps(func)
def _curried(*args, **kwargs):
f = func
count = 0
while isinstance(f, partial):
if f.args:
count += len(f.args)
f = f.func
spec = getargspec(f)
if count == len(spec.args) - len(args):
return func(*args, **kwargs)
return curried(partial(func, *args, **kwargs))
return _curried
| {
"repo_name": "FunctionalX/FunctionalX.py",
"path": "FunctionalX/src/fn/func.py",
"copies": "2",
"size": "2272",
"license": "mit",
"hash": 3806584769022456300,
"line_mean": 26.0476190476,
"line_max": 74,
"alpha_frac": 0.5404929577,
"autogenerated": false,
"ratio": 3.7491749174917492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0035247198838087323,
"num_lines": 84
} |
from functools import partial, wraps
from itertools import chain
import os
import threading
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ..git_mixins.status import FileStatus
from ..commands import GsNavigate
from ...common import ui
from ..git_command import GitCommand
from ...common import util
flatten = chain.from_iterable
MYPY = False
if MYPY:
from typing import Iterable, Iterator, List, Optional, Tuple
# Expected
# - common/commands/view_manipulation.py
# common/ui.py
# core/commands/commit_compare.py -> core/commands/commit_compare_foo.py
# But do not match our stashes or anything from our help
# (1) log git start/stop
# [t] create stash
EXTRACT_FILENAME_RE = (
r"^(?: .+ -> | [ -] (?!\(\d+\) ))"
# ^ leading 4 spaces
# ^ a filename
# ^ marker indicating a rename/move
# ^ OR
# ^ leading 4 spaces or two spaces and our deleted marker
# ^^^^^^^^^^^ but be aware to *not* match stashes
r"(?!Your working directory is clean\.)"
# ^ be aware to *not* match this message which otherwise fulfills our
# filename matcher
r"(\S.*)$"
# ^^^^^^ the actual filename matcher
# Note: A filename cannot start with a space (which is luckily true anyway)
# otherwise our naive `.*` could consume only whitespace.
)
def distinct_until_state_changed(just_render_fn):
"""Custom `lru_cache`-look-alike to minimize redraws."""
previous_state = {}
@wraps(just_render_fn)
def wrapper(self, *args, **kwargs):
nonlocal previous_state
current_state = self.state
if current_state != previous_state:
just_render_fn(self, *args, **kwargs)
previous_state = current_state.copy()
return wrapper
class GsShowStatusCommand(WindowCommand, GitCommand):
"""
Open a status view for the active git repository.
"""
def run(self):
StatusInterface(repo_path=self.repo_path)
class StatusInterface(ui.Interface, GitCommand):
"""
Status dashboard.
"""
interface_type = "status"
read_only = True
syntax_file = "Packages/GitSavvy/syntax/status.sublime-syntax"
word_wrap = False
tab_size = 2
template = """\
BRANCH: {branch_status}
ROOT: {git_root}
HEAD: {head}
{< unstaged_files}
{< untracked_files}
{< staged_files}
{< merge_conflicts}
{< no_status_message}
{< stashes}
{< help}
"""
template_help = """
################### ###############
## SELECTED FILE ## ## ALL FILES ##
################### ###############
[o] open file [a] stage all unstaged files
[s] stage file [A] stage all unstaged and untracked files
[u] unstage file [U] unstage all staged files
[d] discard changes to file [D] discard all unstaged changes
[h] open file on remote
[M] launch external merge tool
[l] diff file inline [f] diff all files
[e] diff file [F] diff all cached files
############# #############
## ACTIONS ## ## STASHES ##
############# #############
[c] commit [t][a] apply stash
[C] commit, including unstaged [t][p] pop stash
[m] amend previous commit [t][s] show stash
[p] push current branch [t][c] create stash
[t][u] create stash including untracked files
[i] ignore file [t][g] create stash of staged changes only
[I] ignore pattern [t][d] drop stash
[B] abort merge
###########
## OTHER ##
###########
[g] show graph repop history
[?] toggle this help menu
[tab] transition to next dashboard
[SHIFT-tab] transition to previous dashboard
[.] move cursor to next file
[,] move cursor to previous file
{conflicts_bindings}
-
"""
conflicts_keybindings = """
###############
## CONFLICTS ##
###############
[y] use version from your commit
[b] use version from the base
"""
template_staged = """
STAGED:
{}
"""
template_unstaged = """
UNSTAGED:
{}
"""
template_untracked = """
UNTRACKED:
{}
"""
template_merge_conflicts = """
MERGE CONFLICTS:
{}
"""
template_stashes = """
STASHES:
{}
"""
def __init__(self, *args, **kwargs):
if self._initialized:
return
self.conflicts_keybindings = \
"\n".join(line[2:] for line in self.conflicts_keybindings.split("\n"))
self._lock = threading.Lock()
self.state = {
'staged_files': [],
'unstaged_files': [],
'untracked_files': [],
'merge_conflicts': [],
'branch_status': '',
'git_root': '',
'show_help': True,
'head': '',
'stashes': []
}
super().__init__(*args, **kwargs)
def title(self):
return "STATUS: {}".format(os.path.basename(self.repo_path))
def refresh_view_state(self):
"""Update all view state.
Note: For every possible long running process, we enqueue a task
in a worker thread. We re-render as soon as we receive meaningful
data which implies that the view is only _eventual_ consistent
with the real world.
"""
for thunk in (
self.fetch_repo_status,
lambda: {'head': self.get_latest_commit_msg_for_head()},
lambda: {'stashes': self.get_stashes()},
):
sublime.set_timeout_async(
partial(self.update_state, thunk, then=self.just_render)
)
# These are cheap to compute, so we just do it!
self.update_state({
'git_root': self.short_repo_path,
'show_help': not self.view.settings().get("git_savvy.help_hidden")
})
def update_state(self, data, then=None):
"""Update internal view state and maybe invoke a callback.
`data` can be a mapping or a callable ("thunk") which returns
a mapping.
Note: We invoke the "sink" without any arguments. TBC.
"""
if callable(data):
data = data()
with self._lock:
self.state.update(data)
if callable(then):
then()
def render(self, nuke_cursors=False):
"""Refresh view state and render."""
self.refresh_view_state()
self.just_render(nuke_cursors)
if hasattr(self, "reset_cursor") and nuke_cursors:
self.reset_cursor()
@distinct_until_state_changed
def just_render(self, nuke_cursors=False):
# TODO: Rewrite to "pureness" so that we don't need a lock here
# Note: It is forbidden to `update_state` during render, e.g. in
# any partials.
with self._lock:
self.clear_regions()
rendered = self._render_template()
self.view.run_command("gs_new_content_and_regions", {
"content": rendered,
"regions": self.regions,
"nuke_cursors": nuke_cursors
})
on_special_symbol = any(
self.view.match_selector(
s.begin(),
'meta.git-savvy.section.body.row'
)
for s in self.view.sel()
)
if not on_special_symbol:
self.view.run_command("gs_status_navigate_goto")
def fetch_repo_status(self, delim=None):
lines = self._get_status()
files_statuses = self._parse_status_for_file_statuses(lines)
branch_status = self._get_branch_status_components(lines)
(staged_files,
unstaged_files,
untracked_files,
merge_conflicts) = self.sort_status_entries(files_statuses)
branch_status = self._format_branch_status(branch_status, delim="\n ")
return {
'staged_files': staged_files,
'unstaged_files': unstaged_files,
'untracked_files': untracked_files,
'merge_conflicts': merge_conflicts,
'branch_status': branch_status
}
def refresh_repo_status_and_render(self):
"""Refresh `git status` state and render.
Most actions in the status dashboard only affect the `git status`.
So instead of calling `render` it is a good optimization to just
ask this method if appropriate.
"""
self.update_state(self.fetch_repo_status, self.just_render)
def after_view_creation(self, view):
view.settings().set("result_file_regex", EXTRACT_FILENAME_RE)
view.settings().set("result_base_dir", self.repo_path)
@ui.partial("branch_status")
def render_branch_status(self):
return self.state['branch_status']
@ui.partial("git_root")
def render_git_root(self):
return self.state['git_root']
@ui.partial("head")
def render_head(self):
return self.state['head']
@ui.partial("staged_files")
def render_staged_files(self):
staged_files = self.state['staged_files']
if not staged_files:
return ""
def get_path(file_status):
""" Display full file_status path, including path_alt if exists """
if file_status.path_alt:
return '{} -> {}'.format(file_status.path_alt, file_status.path)
return file_status.path
return self.template_staged.format("\n".join(
" {} {}".format("-" if f.index_status == "D" else " ", get_path(f))
for f in staged_files
))
@ui.partial("unstaged_files")
def render_unstaged_files(self):
unstaged_files = self.state['unstaged_files']
if not unstaged_files:
return ""
return self.template_unstaged.format("\n".join(
" {} {}".format("-" if f.working_status == "D" else " ", f.path)
for f in unstaged_files
))
@ui.partial("untracked_files")
def render_untracked_files(self):
untracked_files = self.state['untracked_files']
if not untracked_files:
return ""
return self.template_untracked.format(
"\n".join(" " + f.path for f in untracked_files))
@ui.partial("merge_conflicts")
def render_merge_conflicts(self):
merge_conflicts = self.state['merge_conflicts']
if not merge_conflicts:
return ""
return self.template_merge_conflicts.format(
"\n".join(" " + f.path for f in merge_conflicts))
@ui.partial("conflicts_bindings")
def render_conflicts_bindings(self):
return self.conflicts_keybindings if self.state['merge_conflicts'] else ""
@ui.partial("no_status_message")
def render_no_status_message(self):
return ("\n Your working directory is clean.\n"
if not (self.state['staged_files'] or
self.state['unstaged_files'] or
self.state['untracked_files'] or
self.state['merge_conflicts'])
else "")
@ui.partial("stashes")
def render_stashes(self):
stash_list = self.state['stashes']
if not stash_list:
return ""
return self.template_stashes.format("\n".join(
" ({}) {}".format(stash.id, stash.description) for stash in stash_list))
@ui.partial("help")
def render_help(self):
show_help = self.state['show_help']
if not show_help:
return ""
return self.template_help.format(conflicts_bindings=self.render_conflicts_bindings())
ui.register_listeners(StatusInterface)
def get_subjects(view, *sections):
# type: (sublime.View, str) -> Iterable[sublime.Region]
return flatten(
view.find_by_selector(
'meta.git-savvy.status.section.{} meta.git-savvy.status.subject'.format(section)
)
for section in sections
)
def region_as_tuple(region):
# type: (sublime.Region) -> Tuple[int, int]
return region.begin(), region.end()
def region_from_tuple(tuple_):
# type: (Tuple[int, int]) -> sublime.Region
return sublime.Region(*tuple_)
def unique_regions(regions):
# type: (Iterable[sublime.Region]) -> Iterator[sublime.Region]
# Regions are not hashable so we unpack them to tuples,
# then use set, finally pack them again
return map(region_from_tuple, set(map(region_as_tuple, regions)))
def unique_selected_lines(view):
# type: (sublime.View) -> List[sublime.Region]
return list(unique_regions(flatten(view.lines(s) for s in view.sel())))
def get_selected_subjects(view, *sections):
# type: (sublime.View, str) -> List[str]
selected_lines = unique_selected_lines(view)
return [
view.substr(subject)
for subject in get_subjects(view, *sections)
if any(line.contains(subject) for line in selected_lines)
]
def get_selected_files(view, base_path, *sections):
# type: (sublime.View, str, str) -> List[str]
if not sections:
sections = ('staged', 'unstaged', 'untracked', 'merge-conflicts')
make_abs_path = partial(os.path.join, base_path)
return [
os.path.normpath(make_abs_path(filename))
for filename in get_selected_subjects(view, *sections)
]
def get_interface(view):
# type: (sublime.View) -> Optional[StatusInterface]
interface = ui.get_interface(view.id())
if not isinstance(interface, StatusInterface):
return None
return interface
class GsStatusOpenFileCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, open a that
file in a new view.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window = self.view.window()
if not window:
return
for fpath in get_selected_files(self.view, self.repo_path):
window.open_file(fpath)
class GsStatusOpenFileOnRemoteCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, open a new browser
window to that file on GitHub.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
file_paths = get_selected_subjects(self.view, 'staged', 'unstaged', 'merge-conflicts')
if file_paths:
self.view.run_command("gs_github_open_file_on_remote", {"fpath": file_paths})
class GsStatusDiffInlineCommand(TextCommand, GitCommand):
"""
For every file selected or under a cursor, open a new inline-diff view for
that file. If the file is staged, open the inline-diff in cached mode.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window = self.view.window()
if not window:
return
repo_path = self.repo_path
non_cached_files = get_selected_files(self.view, repo_path, 'unstaged', 'merge-conflicts')
cached_files = get_selected_files(self.view, repo_path, 'staged')
sublime.set_timeout_async(
lambda: self.load_inline_diff_views(window, non_cached_files, cached_files)
)
def load_inline_diff_views(self, window, non_cached_files, cached_files):
# type: (sublime.Window, List[str], List[str]) -> None
for fpath in non_cached_files:
syntax = util.file.guess_syntax_for_file(window, fpath)
window.run_command("gs_inline_diff_open", {
"repo_path": self.repo_path,
"file_path": fpath,
"syntax": syntax,
"cached": False,
})
for fpath in cached_files:
syntax = util.file.guess_syntax_for_file(window, fpath)
window.run_command("gs_inline_diff_open", {
"repo_path": self.repo_path,
"file_path": fpath,
"syntax": syntax,
"cached": True,
})
class GsStatusDiffCommand(TextCommand, GitCommand):
"""
For every file selected or under a cursor, open a new diff view for
that file. If the file is staged, open the diff in cached mode.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window = self.view.window()
if not window:
return
repo_path = self.repo_path
non_cached_files = get_selected_files(
self.view, repo_path, 'unstaged', 'untracked', 'merge-conflicts'
)
cached_files = get_selected_files(self.view, repo_path, 'staged')
sublime.set_timeout_async(
lambda: self.load_diff_windows(
window, # type: ignore # https://github.com/python/mypy/issues/4297
non_cached_files,
cached_files
)
)
def load_diff_windows(self, window, non_cached_files, cached_files):
# type: (sublime.Window, List[str], List[str]) -> None
for fpath in non_cached_files:
window.run_command("gs_diff", {
"file_path": fpath,
"in_cached_mode": False,
})
for fpath in cached_files:
window.run_command("gs_diff", {
"file_path": fpath,
"in_cached_mode": True,
})
class GsStatusStageFileCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, if that file is
unstaged, stage it.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window, interface = self.view.window(), get_interface(self.view)
if not (window and interface):
return
file_paths = get_selected_subjects(self.view, 'unstaged', 'untracked', 'merge-conflicts')
if file_paths:
self.stage_file(*file_paths, force=False)
window.status_message("Staged files successfully.")
interface.refresh_repo_status_and_render()
class GsStatusUnstageFileCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, if that file is
staged, unstage it.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window, interface = self.view.window(), get_interface(self.view)
if not (window and interface):
return
file_paths = get_selected_subjects(self.view, 'staged', 'merge-conflicts')
if file_paths:
self.unstage_file(*file_paths)
window.status_message("Unstaged files successfully.")
interface.refresh_repo_status_and_render()
class GsStatusDiscardChangesToFileCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, if that file is
unstaged, reset the file to HEAD. If it is untracked, delete it.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window, interface = self.view.window(), get_interface(self.view)
if not (window and interface):
return
untracked_files = self.discard_untracked()
unstaged_files = self.discard_unstaged()
if untracked_files or unstaged_files:
window.status_message("Successfully discarded changes.")
interface.refresh_repo_status_and_render()
def discard_untracked(self):
# type: () -> Optional[List[str]]
file_paths = get_selected_subjects(self.view, 'untracked')
@util.actions.destructive(description="discard one or more untracked files")
def do_discard():
self.discard_untracked_file(*file_paths)
return file_paths
if file_paths:
return do_discard()
return None
def discard_unstaged(self):
# type: () -> Optional[List[str]]
file_paths = get_selected_subjects(self.view, 'unstaged', 'merge-conflicts')
@util.actions.destructive(description="discard one or more unstaged files")
def do_discard():
self.checkout_file(*file_paths)
return file_paths
if file_paths:
return do_discard()
return None
class GsStatusStageAllFilesCommand(TextCommand, GitCommand):
"""
Stage all unstaged files.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
self.add_all_tracked_files()
interface.refresh_repo_status_and_render()
class GsStatusStageAllFilesWithUntrackedCommand(TextCommand, GitCommand):
"""
Stage all unstaged files, including new files.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
self.add_all_files()
interface.refresh_repo_status_and_render()
class GsStatusUnstageAllFilesCommand(TextCommand, GitCommand):
"""
Unstage all staged changes.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
self.unstage_all_files()
interface.refresh_repo_status_and_render()
class GsStatusDiscardAllChangesCommand(TextCommand, GitCommand):
"""
Reset all unstaged files to HEAD.
"""
@util.actions.destructive(description="discard all unstaged changes, "
"and delete all untracked files")
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
self.discard_all_unstaged()
interface.refresh_repo_status_and_render()
class GsStatusIgnoreFileCommand(TextCommand, GitCommand):
"""
For each file that is selected or under a cursor, add an
entry to the git root's `.gitignore` file.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window, interface = self.view.window(), get_interface(self.view)
if not (window and interface):
return
file_paths = get_selected_subjects(
self.view, 'staged', 'unstaged', 'untracked', 'merge-conflicts'
)
if file_paths:
for fpath in file_paths:
self.add_ignore(os.path.join("/", fpath))
window.status_message("Successfully ignored files.")
interface.refresh_repo_status_and_render()
class GsStatusIgnorePatternCommand(TextCommand, GitCommand):
"""
For the first file that is selected or under a cursor (other
selections/cursors will be ignored), prompt the user for
a new pattern to `.gitignore`, prefilled with the filename.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
window, interface = self.view.window(), get_interface(self.view)
if not (window and interface):
return
file_paths = get_selected_subjects(
self.view, 'staged', 'unstaged', 'untracked', 'merge-conflicts'
)
if file_paths:
window.run_command("gs_ignore_pattern", {"pre_filled": file_paths[0]})
class GsStatusStashCommand(TextCommand, GitCommand):
"""
Run action from status dashboard to stash commands. Need to have this command to
read the interface and call the stash commands
action multiple stashes
show True
apply False
pop False
discard False
"""
def run(self, edit, action=None):
# type: (sublime.Edit, str) -> None
window = self.view.window()
if not window:
return
ids = get_selected_subjects(self.view, 'stashes')
if not ids:
return
if action == "show":
window.run_command("gs_stash_show", {"stash_ids": ids})
return
if len(ids) > 1:
window.status_message("You can only {} one stash at a time.".format(action))
return
if action == "apply":
window.run_command("gs_stash_apply", {"stash_id": ids[0]})
elif action == "pop":
window.run_command("gs_stash_pop", {"stash_id": ids[0]})
elif action == "drop":
window.run_command("gs_stash_drop", {"stash_id": ids[0]})
class GsStatusLaunchMergeToolCommand(TextCommand, GitCommand):
"""
Launch external merge tool for selected file.
"""
def run(self, edit):
# type: (sublime.Edit) -> None
file_paths = get_selected_subjects(
self.view, 'staged', 'unstaged', 'untracked', 'merge-conflicts'
)
if len(file_paths) > 1:
sublime.error_message("You can only launch merge tool for a single file at a time.")
return
sublime.set_timeout_async(lambda: self.launch_tool_for_file(file_paths[0]), 0)
class GsStatusUseCommitVersionCommand(TextCommand, GitCommand):
# TODO: refactor this alongside interfaces.rebase.GsRebaseUseCommitVersionCommand
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
conflicts = interface.state['merge_conflicts']
file_paths = get_selected_subjects(self.view, 'merge-conflicts')
for fpath in file_paths:
if self.is_commit_version_deleted(fpath, conflicts):
self.git("rm", "--", fpath)
else:
self.git("checkout", "--theirs", "--", fpath)
self.stage_file(fpath)
interface.refresh_repo_status_and_render()
def is_commit_version_deleted(self, path, conflicts):
# type: (str, List[FileStatus]) -> bool
for conflict in conflicts:
if conflict.path == path:
return conflict.working_status == "D"
return False
class GsStatusUseBaseVersionCommand(TextCommand, GitCommand):
def run(self, edit):
# type: (sublime.Edit) -> None
interface = get_interface(self.view)
if not interface:
return
conflicts = interface.state['merge_conflicts']
file_paths = get_selected_subjects(self.view, 'merge-conflicts')
for fpath in file_paths:
if self.is_base_version_deleted(fpath, conflicts):
self.git("rm", "--", fpath)
else:
self.git("checkout", "--ours", "--", fpath)
self.stage_file(fpath)
interface.refresh_repo_status_and_render()
def is_base_version_deleted(self, path, conflicts):
# type: (str, List[FileStatus]) -> bool
for conflict in conflicts:
if conflict.path == path:
return conflict.index_status == "D"
return False
class GsStatusNavigateFileCommand(GsNavigate):
"""
Move cursor to the next (or previous) selectable item in the dashboard.
"""
offset = 0
def get_available_regions(self):
return self.view.find_by_selector(
"meta.git-savvy.entity - meta.git-savvy.entity.filename.renamed.to"
)
class GsStatusNavigateGotoCommand(GsNavigate):
"""
Move cursor to the next (or previous) selectable file in the dashboard.
"""
offset = 0
def get_available_regions(self):
return (
self.view.find_by_selector("gitsavvy.gotosymbol")
+ self.view.find_all("Your working directory is clean", sublime.LITERAL)
)
| {
"repo_name": "divmain/GitSavvy",
"path": "core/interfaces/status.py",
"copies": "1",
"size": "27966",
"license": "mit",
"hash": 2542343746415027700,
"line_mean": 29.8335170893,
"line_max": 98,
"alpha_frac": 0.5770936137,
"autogenerated": false,
"ratio": 3.913518052057095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4990611665757095,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from itertools import ifilter, imap
from operator import not_
def list_subtract(a, b):
"""Return a list ``a`` without the elements of ``b``.
If a particular value is in ``a`` twice and ``b`` once then the returned
list then that value will appear once in the returned list.
"""
a_only = list(a)
for x in b:
if x in a_only:
a_only.remove(x)
return a_only
def identity(x):
return x
def caller(method_name, *args, **kwargs):
def call_obj(obj):
return getattr(obj, method_name)(*args, **kwargs)
return call_obj
def compose(*functions):
if not functions:
raise ValueError("Must specify functions to compose")
def composed(*args, **kwargs):
fs = list(functions)
y = fs.pop()(*args, **kwargs)
while fs:
f = fs.pop()
y = f(y)
return y
return composed
def wrap_result(wrapper):
return lambda f: wraps(f)(compose(wrapper, f))
negate = wrap_result(not_)
def on_items(f, d):
return compose(dict, f, caller('items'))(d)
def dichotomy(p, xs):
return ifilter(negate(p), xs), ifilter(p, xs)
def map_dict(f, d):
return on_items(partial(imap, f), d)
def filter_dict(p, d):
return on_items(partial(ifilter, p), d)
def map_keys(f, d):
return map_dict(lambda (k, v): (f(k), v), d)
def filter_keys(f, d):
return filter_dict(lambda (k, v): f(k), d)
def map_values(f, d):
"""Map ``f`` across the values of ``d``.
:return: A dict with the same keys as ``d``, where the value
of each key ``k`` is ``f(d[k])``.
"""
return map_dict(lambda (k, v): (k, f(v)), d)
def filter_values(f, d):
"""Filter ``dictionary`` by its values using ``function``."""
return filter_dict(lambda (k, v): f(v), d)
def dict_subtract(a, b):
"""Return the part of ``a`` that's not in ``b``."""
return dict((k, a[k]) for k in set(a) - set(b))
| {
"repo_name": "jml/perfidy",
"path": "perfidy/_func.py",
"copies": "1",
"size": "1969",
"license": "mit",
"hash": 8530123999908043000,
"line_mean": 21.1235955056,
"line_max": 76,
"alpha_frac": 0.5855764347,
"autogenerated": false,
"ratio": 3.165594855305466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42511712900054655,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from mimetypes import guess_type
from os import path
from re import sub
from time import gmtime, strftime
from urllib.parse import unquote
from sanic.compat import stat_async
from sanic.exceptions import (
ContentRangeError,
FileNotFound,
HeaderNotFound,
InvalidUsage,
)
from sanic.handlers import ContentRangeHandler
from sanic.response import HTTPResponse, file, file_stream
async def _static_request_handler(
file_or_directory,
use_modified_since,
use_content_range,
stream_large_files,
request,
content_type=None,
file_uri=None,
):
# Using this to determine if the URL is trying to break out of the path
# served. os.path.realpath seems to be very slow
if file_uri and "../" in file_uri:
raise InvalidUsage("Invalid URL")
# Merge served directory and requested file if provided
# Strip all / that in the beginning of the URL to help prevent python
# from herping a derp and treating the uri as an absolute path
root_path = file_path = file_or_directory
if file_uri:
file_path = path.join(file_or_directory, sub("^[/]*", "", file_uri))
# URL decode the path sent by the browser otherwise we won't be able to
# match filenames which got encoded (filenames with spaces etc)
file_path = path.abspath(unquote(file_path))
if not file_path.startswith(path.abspath(unquote(root_path))):
raise FileNotFound(
"File not found", path=file_or_directory, relative_url=file_uri
)
try:
headers = {}
# Check if the client has been sent this file before
# and it has not been modified since
stats = None
if use_modified_since:
stats = await stat_async(file_path)
modified_since = strftime(
"%a, %d %b %Y %H:%M:%S GMT", gmtime(stats.st_mtime)
)
if request.headers.get("If-Modified-Since") == modified_since:
return HTTPResponse(status=304)
headers["Last-Modified"] = modified_since
_range = None
if use_content_range:
_range = None
if not stats:
stats = await stat_async(file_path)
headers["Accept-Ranges"] = "bytes"
headers["Content-Length"] = str(stats.st_size)
if request.method != "HEAD":
try:
_range = ContentRangeHandler(request, stats)
except HeaderNotFound:
pass
else:
del headers["Content-Length"]
for key, value in _range.headers.items():
headers[key] = value
headers["Content-Type"] = (
content_type or guess_type(file_path)[0] or "text/plain"
)
if request.method == "HEAD":
return HTTPResponse(headers=headers)
else:
if stream_large_files:
if type(stream_large_files) == int:
threshold = stream_large_files
else:
threshold = 1024 * 1024
if not stats:
stats = await stat_async(file_path)
if stats.st_size >= threshold:
return await file_stream(
file_path, headers=headers, _range=_range
)
return await file(file_path, headers=headers, _range=_range)
except ContentRangeError:
raise
except Exception:
raise FileNotFound(
"File not found", path=file_or_directory, relative_url=file_uri
)
def register(
app,
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name="static",
host=None,
strict_slashes=None,
content_type=None,
):
# TODO: Though sanic is not a file server, I feel like we should at least
# make a good effort here. Modified-since is nice, but we could
# also look into etags, expires, and caching
"""
Register a static directory handler with Sanic by adding a route to the
router and registering a handler.
:param app: Sanic
:param file_or_directory: File or directory path to serve from
:param uri: URL to serve from
:param pattern: regular expression used to match files in the URL
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the
server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the file_stream() handler rather
than the file() handler to send the file
If this is an integer, this represents the
threshold size to switch to file_stream()
:param name: user defined name used for url_for
:param content_type: user defined content type for header
:return: registered static routes
:rtype: List[sanic.router.Route]
"""
# If we're not trying to match a file directly,
# serve from the folder
if not path.isfile(file_or_directory):
uri += "<file_uri:" + pattern + ">"
# special prefix for static files
if not name.startswith("_static_"):
name = f"_static_{name}"
_handler = wraps(_static_request_handler)(
partial(
_static_request_handler,
file_or_directory,
use_modified_since,
use_content_range,
stream_large_files,
content_type=content_type,
)
)
_routes, _ = app.route(
uri,
methods=["GET", "HEAD"],
name=name,
host=host,
strict_slashes=strict_slashes,
)(_handler)
return _routes
| {
"repo_name": "channelcat/sanic",
"path": "sanic/static.py",
"copies": "1",
"size": "5976",
"license": "mit",
"hash": -8968757582895643000,
"line_mean": 34.7844311377,
"line_max": 77,
"alpha_frac": 0.5881860776,
"autogenerated": false,
"ratio": 4.368421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5456607130231579,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from urllib.parse import quote
from django.conf import settings
from django.contrib import messages
from django.urls import reverse
from django.shortcuts import redirect
from apostello.models import Keyword
def keyword_access_check(method):
"""Check a user can access a specific keyword."""
@wraps(method)
def wrapper(request, *args, **kwargs):
if request.user.is_staff:
return method(request, *args, **kwargs)
try:
keyword = Keyword.objects.get(keyword=kwargs["keyword"])
if keyword.is_locked:
if not keyword.can_user_access(request.user):
messages.warning(request, settings.NO_ACCESS_WARNING)
return redirect("/keyword/all/")
except Keyword.DoesNotExist:
pass
return method(request, *args, **kwargs)
return wrapper
def check_user_perms(view=None, require=None):
"""Check a user has the specified permissions."""
if view is None:
return partial(check_user_perms, require=require)
@wraps(view)
def f(*args, **kwargs):
request = args[0]
if request.user.is_staff:
return view(*args, **kwargs)
if require is None:
# if no requirements, then limit to staff
if request.user.is_staff:
return view(*args, **kwargs)
else:
# check for anon users:
# this hsould not be neccessary, but it works...
if not request.user.is_authenticated:
redirect_url = settings.LOGIN_URL + "?next=" + quote(request.get_full_path())
return redirect(redirect_url)
# check approval status:
if not request.user.profile.approved:
return redirect(reverse("not_approved"))
# check user has required permissions
tested_perms = [request.user.profile.__getattribute__(x) for x in require]
if all(tested_perms):
return view(*args, **kwargs)
messages.warning(request, settings.NO_ACCESS_WARNING)
return redirect("/")
return f
| {
"repo_name": "monty5811/apostello",
"path": "apostello/decorators.py",
"copies": "1",
"size": "2175",
"license": "mit",
"hash": 8079062136100856000,
"line_mean": 32.4615384615,
"line_max": 93,
"alpha_frac": 0.6068965517,
"autogenerated": false,
"ratio": 4.429735234215886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5536631785915885,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
import json
from xml.etree.ElementTree import ParseError
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.forms.formsets import formset_factory
from vecnet.openmalaria.scenario import Scenario
from ts_om.models import Scenario as ScenarioModel
from ts_om.forms import ScenarioDeploymentsForm, ScenarioDeploymentForm
from ts_om.views.ScenarioBaseFormView import ScenarioBaseFormView
from ts_om.views.ScenarioValidationView import rest_validate
__author__ = 'nreed'
class ScenarioDeploymentsView(ScenarioBaseFormView):
template_name = "ts_om/deployments.html"
form_class = ScenarioDeploymentsForm
def get_success_url(self):
return reverse('ts_om.summary', kwargs={'scenario_id': self.kwargs['scenario_id']})
def get_context_data(self, **kwargs):
context = super(ScenarioDeploymentsView, self).get_context_data(**kwargs)
component_ids = []
for intervention in self.scenario.interventions.human:
component_ids.append((intervention.id, intervention.id))
ScenarioDeploymentFormSet = formset_factory(wraps(ScenarioDeploymentForm)
(partial(ScenarioDeploymentForm, components=component_ids)),
extra=0, can_delete=True)
deployment_formset = ScenarioDeploymentFormSet(initial=parse_deployments(self.scenario),
prefix='deployment')
context["deployment_formset"] = deployment_formset
context["has_components"] = len(component_ids) > 0
return context
def form_valid(self, form, **kwargs):
component_ids = []
for intervention in self.scenario.interventions.human:
component_ids.append((intervention.id, intervention.id))
ScenarioDeploymentFormSet = formset_factory(wraps(ScenarioDeploymentForm)
(partial(ScenarioDeploymentForm, components=component_ids)),
extra=0, can_delete=True)
deployment_formset = ScenarioDeploymentFormSet(self.request.POST, prefix='deployment')
if not deployment_formset.is_valid():
return super(ScenarioDeploymentsView, self).form_invalid(form)
deployments = []
for form in deployment_formset:
deployment_info = {
'name': '',
'components': form.cleaned_data["components"]
}
if 'name' in form.cleaned_data:
deployment_info['name'] = form.cleaned_data["name"]
times = form.cleaned_data["timesteps"].split(',')
coverages = form.cleaned_data["coverages"].split(',')
timesteps = []
for index, time in enumerate(times):
timesteps.append({
"time": time,
"coverage": coverages[index] if len(coverages) > index else coverages[0]
})
deployment_info["timesteps"] = timesteps
deployments.append(deployment_info)
self.scenario.interventions.human.deployments = deployments
return super(ScenarioDeploymentsView, self).form_valid(form, kwargs={'xml': self.scenario.xml})
def parse_deployments(scenario):
deployments = []
for deployment in scenario.interventions.human.deployments:
deployment_info = {'name': deployment.name, 'components': deployment.components}
times = [str(timestep["time"]) for timestep in deployment.timesteps]
coverages = [str(timestep["coverage"]) for timestep in deployment.timesteps]
deployment_info["timesteps"] = ','.join(times)
deployment_info["coverages"] = ','.join(coverages)
deployments.append(deployment_info)
return deployments
| {
"repo_name": "vecnet/vnetsource",
"path": "ts_om/views/ScenarioDeploymentsView.py",
"copies": "2",
"size": "3931",
"license": "mpl-2.0",
"hash": 7497070295754225000,
"line_mean": 39.5257731959,
"line_max": 112,
"alpha_frac": 0.6387687611,
"autogenerated": false,
"ratio": 4.565621370499419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6204390131599419,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
import json
import attr
from minion.http import Headers, MediaRange
from minion.request import Response
class JSON(object):
"""
An 'intelligent' JSON renderer that pretty-prints JSON for humans.
"""
def __init__(self, **kwargs):
self._dumps = partial(json.dumps, **kwargs)
def render(self, request, jsonable):
machine_json = request.accept.media_types[-1] == MediaRange(
type="application", subtype="json",
)
if machine_json:
content = self._dumps(jsonable, separators=",:")
else:
content = self._dumps(jsonable, indent=2, sort_keys=True)
return Response(
content=content,
headers=Headers([("Content-Type", ["application/json"])]),
)
class SimpleJSON(object):
"""
A simple JSON renderer that renders by dumping with any given parameters.
"""
def __init__(self, **kwargs):
self._dumps = partial(json.dumps, **kwargs)
def render(self, request, jsonable):
return Response(self._dumps(jsonable))
@attr.s
class Unicode(object):
encoding = attr.ib()
errors = attr.ib(default="strict")
def render(self, request, text):
return Response(
text.encode(encoding=self.encoding, errors=self.errors),
)
UTF8 = Unicode(encoding="utf-8")
def bind(renderer, to):
"""
Bind a renderer to the given callable by constructing a new rendering view.
"""
@wraps(to)
def view(request, **kwargs):
try:
returned = to(request, **kwargs)
except Exception as error:
view_error = getattr(renderer, "view_error", None)
if view_error is None:
raise
return view_error(request, error)
try:
return renderer.render(request, returned)
except Exception as error:
render_error = getattr(renderer, "render_error", None)
if render_error is None:
raise
return render_error(request, returned, error)
return view
| {
"repo_name": "Julian/Minion",
"path": "minion/renderers.py",
"copies": "1",
"size": "2123",
"license": "mit",
"hash": -7673801123038807000,
"line_mean": 23.9764705882,
"line_max": 79,
"alpha_frac": 0.5967969854,
"autogenerated": false,
"ratio": 4.212301587301587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5309098572701587,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
import queue
import threading
class OP:
"""
A client operation.
:param client: The client this operation is executed from.
:type client: :class:`leaf.client.Client`
:param func: The operation function that is going to be executed.
:type func: function
:param require_connection: If require_connection is true and the client
is not connected an error message will be drawn
on the UI.
:type require_connection: bool
"""
def __init__(self, client, func, require_connection):
self.client = client
self.func = func
self.require_connection = require_connection
def __call__(self, *args, **kwargs):
if self.require_connection and not self.client.connected:
self.client.ui.draw_client_info("Error: Not connected to server")
self.client.show_help("connect")
return
self.func(self.client, *args, **kwargs)
def op(func=None, require_connection=True):
"""
A decorator which enqueues the decorated method into the
:class:`OPExecutor` thread when called.
The object which the decorated method belongs to needs to have an
:class:`OPExecutor` object attribute named `op_executor`.
:param require_connection: If the operation requires the client to be
connected to the server when it's executed.
:type require_connection: bool
"""
if func is None:
return partial(op, require_connection=require_connection)
@wraps(func)
def decorator(self, *args, **kwargs):
op = OP(self, func, require_connection)
self.op_executor.enqueue(op, *args, **kwargs)
return decorator
class OPExecutor(threading.Thread):
"""
Thread used for executing long-running operations in the client.
:var exception_handler: Callback which handles exceptions for operation
executions.
:vartype exception_handler: function
"""
def __init__(self, exception_handler):
super().__init__()
self.stop_event = threading.Event()
self.queue = queue.Queue()
self.exception_handler = exception_handler
def enqueue(self, op, *args, **kwargs):
"""
Push an operation into the queue to be executed later.
:param op: The operation to be queued for execution
:type op: :class:`OP`
"""
self.queue.put(partial(op, *args, **kwargs))
def run(self):
while not self.stop_event.isSet():
try:
self.queue.get(timeout=0.5)()
except queue.Empty:
pass
except Exception as exc:
self.exception_handler(exc)
def stop(self):
"""
Stop the thread.
"""
if self.isAlive():
# Toggle shutdown of the executor
self.stop_event.set()
# Wait for all queued operations to finish
self.join()
| {
"repo_name": "simonklb/matrix-leaf",
"path": "leaf/client/op.py",
"copies": "1",
"size": "3050",
"license": "mit",
"hash": -8977653101772317000,
"line_mean": 30.4432989691,
"line_max": 78,
"alpha_frac": 0.6049180328,
"autogenerated": false,
"ratio": 4.5051698670605616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 97
} |
from functools import partial, wraps
from crispy_forms.layout import Layout, HTML
from django.contrib import messages
from django.forms import BaseFormSet
from django.forms.models import formset_factory
from django.http import HttpResponseRedirect
from mezzanine.pages.page_processors import processor_for
from hs_app_netCDF.forms import ModalDialogLayoutAddVariable, OriginalCoverageForm, \
OriginalCoverageMetaDelete, VariableForm, VariableLayoutEdit
from hs_app_netCDF.models import NetcdfResource
from hs_core import page_processors
from hs_core.forms import MetaDataElementDeleteForm
from hs_core.views import add_generic_context
@processor_for(NetcdfResource)
# when the resource is created this page will be shown
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
if content_model.metadata.is_dirty:
messages.info(request, "NetCDF file is out of sync with resource metadata changes.")
if not edit_resource: # not editing mode
# get the context from hs_core
context = page_processors.get_page_context(page, request.user, resource_edit=edit_resource,
extended_metadata_layout=None, request=request)
if isinstance(context, HttpResponseRedirect):
# sending user to login page
return context
extended_metadata_exists = False
if content_model.metadata.variables.all() or content_model.metadata.ori_coverage.all():
extended_metadata_exists = True
elif content_model.files.all():
for f in content_model.files.all():
if '_header_info.txt' in f.resource_file.name:
extended_metadata_exists = True
context['extended_metadata_exists'] = extended_metadata_exists
# add the variables context
# the variables is the field name from NetCDFMetaData model
context['variables'] = content_model.metadata.variables.all()
# add the original coverage context
ori_cov_dict = {}
ori_cov_obj = content_model.metadata.ori_coverage.all().first()
if ori_cov_obj:
ori_cov_dict['name'] = ori_cov_obj.value.get('name', None)
ori_cov_dict['units'] = ori_cov_obj.value['units']
ori_cov_dict['projection'] = ori_cov_obj.value.get('projection', None)
ori_cov_dict['northlimit'] = ori_cov_obj.value['northlimit']
ori_cov_dict['eastlimit'] = ori_cov_obj.value['eastlimit']
ori_cov_dict['southlimit'] = ori_cov_obj.value['southlimit']
ori_cov_dict['westlimit'] = ori_cov_obj.value['westlimit']
ori_cov_dict['projection_string_type'] = ori_cov_obj.projection_string_type
ori_cov_dict['projection_string_text'] = ori_cov_obj.projection_string_text
ori_cov_dict['datum'] = ori_cov_obj.datum
context['original_coverage'] = ori_cov_dict
else:
context['original_coverage'] = None
else: # editing mode
# Original Coverage in editing mode
ori_cov_obj = content_model.metadata.ori_coverage.all().first()
ori_cov_dict = {}
if ori_cov_obj:
ori_cov_dict['name'] = ori_cov_obj.value.get('name', None)
ori_cov_dict['units'] = ori_cov_obj.value['units']
ori_cov_dict['projection'] = ori_cov_obj.value.get('projection', None)
ori_cov_dict['northlimit'] = ori_cov_obj.value['northlimit']
ori_cov_dict['eastlimit'] = ori_cov_obj.value['eastlimit']
ori_cov_dict['southlimit'] = ori_cov_obj.value['southlimit']
ori_cov_dict['westlimit'] = ori_cov_obj.value['westlimit']
ori_cov_dict['projection_string_type'] = ori_cov_obj.projection_string_type
ori_cov_dict['projection_string_text'] = ori_cov_obj.projection_string_text
ori_cov_dict['datum'] = ori_cov_obj.datum
else:
ori_cov_obj = None
ori_cov_form = OriginalCoverageForm(initial=ori_cov_dict,
allow_edit=edit_resource,
res_short_id=content_model.short_id,
element_id=ori_cov_obj.id if ori_cov_obj else None)
ori_cov_form.delete_modal_form = OriginalCoverageMetaDelete(
content_model.short_id, 'originalcoverage', ori_cov_obj.id if ori_cov_obj else None)
# Variable Forms in editing mode
VariableFormSetEdit = formset_factory(wraps(VariableForm)
(partial(VariableForm, allow_edit=edit_resource)),
formset=BaseFormSet, extra=0)
variable_formset = VariableFormSetEdit(
initial=list(content_model.metadata.variables.all().values()), prefix='variable')
add_variable_modal_form = VariableForm(
allow_edit=edit_resource, res_short_id=content_model.short_id)
for form in variable_formset.forms:
if len(form.initial) > 0:
form.delete_modal_form = MetaDataElementDeleteForm(
content_model.short_id, 'variable', form.initial['id'])
form.action = "/hsapi/_internal/%s/variable/%s/update-metadata/" % \
(content_model.short_id, form.initial['id'])
form.number = form.initial['id']
else:
form.action = "/hsapi/_internal/%s/variable/add-metadata/" % content_model.short_id
# netcdf file update notification in editing mode
UpdateNetcdfLayout = HTML(content_model.metadata.get_update_netcdf_file_html_form())
# get the context from hs_core
ext_md_layout = Layout(
UpdateNetcdfLayout,
HTML(
"""
<div class="form-group col-xs-12" id="originalcoverage">
{% load crispy_forms_tags %}
{% crispy original_coverage_form %}
</div>
"""
),
VariableLayoutEdit,
ModalDialogLayoutAddVariable,)
context = page_processors.get_page_context(page, request.user, resource_edit=edit_resource,
extended_metadata_layout=ext_md_layout,
request=request)
context['variable_formset'] = variable_formset
context['add_variable_modal_form'] = add_variable_modal_form
context['original_coverage_form'] = ori_cov_form
# get hs_core context
hs_core_context = add_generic_context(request, page)
context.update(hs_core_context)
return context
| {
"repo_name": "hydroshare/hydroshare",
"path": "hs_app_netCDF/page_processors.py",
"copies": "1",
"size": "6814",
"license": "bsd-3-clause",
"hash": -6399414974906306000,
"line_mean": 47.3262411348,
"line_max": 99,
"alpha_frac": 0.6099207514,
"autogenerated": false,
"ratio": 3.9801401869158877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016310270085742465,
"num_lines": 141
} |
from functools import partial, wraps
from crispy_forms.layout import Layout, HTML
from django.forms import formset_factory
from django.http import HttpResponseRedirect
from mezzanine.pages.page_processors import processor_for
from hs_core import page_processors
from hs_core.forms import BaseFormSet, MetaDataElementDeleteForm
from hs_core.views import add_generic_context
from hs_modflow_modelinstance.forms import ModelOutputForm, ExecutedByForm, StudyAreaForm, \
GridDimensionsForm, StressPeriodForm, GroundWaterFlowForm, BoundaryConditionForm, \
ModelCalibrationForm, ModelInputForm, GeneralElementsForm, ModelInputLayoutEdit, \
ModalDialogLayoutAddModelInput
from hs_modflow_modelinstance.models import MODFLOWModelInstanceResource
@processor_for(MODFLOWModelInstanceResource)
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
if not edit_resource or content_model.raccess.published:
# get the context from hs_core
context = page_processors.get_page_context(page,
request.user,
request=request,
resource_edit=edit_resource,
extended_metadata_layout=None)
if isinstance(context, HttpResponseRedirect):
# sending user to login page
return context
extended_metadata_exists = False
if content_model.metadata.model_output or \
content_model.metadata.executed_by or \
content_model.metadata.study_area or \
content_model.metadata.grid_dimensions or \
content_model.metadata.stress_period or \
content_model.metadata.ground_water_flow or \
content_model.metadata.boundary_condition or \
content_model.metadata.model_calibration or \
content_model.metadata.model_inputs or \
content_model.metadata.general_elements:
extended_metadata_exists = True
context['extended_metadata_exists'] = extended_metadata_exists
context['model_output'] = content_model.metadata.model_output
context['executed_by'] = content_model.metadata.executed_by
context['study_area'] = content_model.metadata.study_area
context['grid_dimensions'] = content_model.metadata.grid_dimensions
context['stress_period'] = content_model.metadata.stress_period
context['ground_water_flow'] = content_model.metadata.ground_water_flow
context['boundary_condition'] = content_model.metadata.boundary_condition
context['model_calibration'] = content_model.metadata.model_calibration
context['model_inputs'] = content_model.metadata.model_inputs
context['general_elements'] = content_model.metadata.general_elements
# add MODFLOW Model parameters context
else:
model_output_form = ModelOutputForm(instance=content_model.metadata.model_output,
res_short_id=content_model.short_id,
element_id=content_model.metadata.model_output.id
if content_model.metadata.model_output else None)
executed_by_form = ExecutedByForm(instance=content_model.metadata.executed_by,
res_short_id=content_model.short_id,
element_id=content_model.metadata.executed_by.id
if content_model.metadata.executed_by else None)
study_area_form = StudyAreaForm(instance=content_model.metadata.study_area,
res_short_id=content_model.short_id,
element_id=content_model.metadata.study_area.id
if content_model.metadata.study_area else None)
grid_dimensions_form = GridDimensionsForm(
instance=content_model.metadata.grid_dimensions,
res_short_id=content_model.short_id,
element_id=content_model.metadata.grid_dimensions.id
if content_model.metadata.grid_dimensions else None)
stress_period_form = StressPeriodForm(instance=content_model.metadata.stress_period,
res_short_id=content_model.short_id,
element_id=content_model.metadata.stress_period.id
if content_model.metadata.stress_period else None)
ground_water_flow_form = GroundWaterFlowForm(
instance=content_model.metadata.ground_water_flow,
res_short_id=content_model.short_id,
element_id=content_model.metadata.ground_water_flow.id
if content_model.metadata.ground_water_flow else None)
boundary_condition_form = BoundaryConditionForm(
instance=content_model.metadata.boundary_condition,
res_short_id=content_model.short_id,
element_id=content_model.metadata.boundary_condition.id
if content_model.metadata.boundary_condition else None)
model_calibration_form = ModelCalibrationForm(
instance=content_model.metadata.model_calibration,
res_short_id=content_model.short_id,
element_id=content_model.metadata.model_calibration.id
if content_model.metadata.model_calibration else None)
ModelInputFormSetEdit = formset_factory(wraps(ModelInputForm)(partial(ModelInputForm,
allow_edit=True)),
formset=BaseFormSet, extra=0)
model_input_formset = ModelInputFormSetEdit(
initial=list(content_model.metadata.model_inputs.values()),
prefix='modelinput')
for model_input_form in model_input_formset.forms:
if len(model_input_form.initial) > 0:
model_input_form.action = "/hsapi/_internal/%s/modelinput/%s/update-metadata/" % \
(content_model.short_id, model_input_form.initial['id'])
model_input_form.delete_modal_form = MetaDataElementDeleteForm(
content_model.short_id, 'modelinput',
model_input_form.initial['id'])
model_input_form.number = model_input_form.initial['id']
else:
model_input_form.action = "/hsapi/_internal/%s/modelinput/add-metadata/" % \
content_model.short_id
add_modelinput_modal_form = ModelInputForm(allow_edit=False,
res_short_id=content_model.short_id)
general_elements_form = GeneralElementsForm(
instance=content_model.metadata.general_elements,
res_short_id=content_model.short_id,
element_id=content_model.metadata.general_elements.id
if content_model.metadata.general_elements else None)
ext_md_layout = Layout(HTML("<div class='col-xs-12 col-sm-6'>"
"<div class='form-group' id='modeloutput'> "
'{% load crispy_forms_tags %} '
'{% crispy model_output_form %} '
'</div>'),
HTML('<div class="form-group" id="executedby"> '
'{% load crispy_forms_tags %} '
'{% crispy executed_by_form %} '
'</div> '),
HTML('<div class="form-group" id="boundarycondition"> '
'{% load crispy_forms_tags %} '
'{% crispy boundary_condition_form %} '
'</div>'),
HTML('<div class="form-group" id="generalelements"> '
'{% load crispy_forms_tags %} '
'{% crispy general_elements_form %} '
'</div>'),
HTML("</div>"),
ModelInputLayoutEdit,
ModalDialogLayoutAddModelInput,
HTML('<div class="col-xs-12 col-sm-6">'
'<div class="form-group" id="studyarea"> '
'{% load crispy_forms_tags %} '
'{% crispy study_area_form %} '
'</div> '),
HTML('<div class="form-group" id="griddimensions"> '
'{% load crispy_forms_tags %} '
'{% crispy grid_dimensions_form %} '
'</div>'),
HTML('<div class="form-group" id="stressperiod"> '
'{% load crispy_forms_tags %} '
'{% crispy stress_period_form %} '
'</div>'),
HTML('<div class="form-group" id="groundwaterflow"> '
'{% load crispy_forms_tags %} '
'{% crispy ground_water_flow_form %} '
'</div>'),
HTML('<div class="form-group" id="modelcalibration"> '
'{% load crispy_forms_tags %} '
'{% crispy model_calibration_form %} '
'</div></div>')
)
# get the context from hs_core
context = page_processors.get_page_context(page,
request.user,
resource_edit=edit_resource,
extended_metadata_layout=ext_md_layout,
request=request)
context['resource_type'] = 'MODFLOW Model Instance Resource'
context['model_output_form'] = model_output_form
context['executed_by_form'] = executed_by_form
context['study_area_form'] = study_area_form
context['grid_dimensions_form'] = grid_dimensions_form
context['stress_period_form'] = stress_period_form
context['ground_water_flow_form'] = ground_water_flow_form
context['boundary_condition_form'] = boundary_condition_form
context['model_calibration_form'] = model_calibration_form
context['model_input_formset'] = model_input_formset
context['add_modelinput_modal_form'] = add_modelinput_modal_form
context['general_elements_form'] = general_elements_form
hs_core_context = add_generic_context(request, page)
context.update(hs_core_context)
return context
| {
"repo_name": "hydroshare/hydroshare",
"path": "hs_modflow_modelinstance/page_processors.py",
"copies": "1",
"size": "11364",
"license": "bsd-3-clause",
"hash": 8947570394892241000,
"line_mean": 53.8985507246,
"line_max": 98,
"alpha_frac": 0.5341429074,
"autogenerated": false,
"ratio": 4.833687792428754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5867830699828754,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from crispy_forms.layout import Layout, HTML
from django.forms.models import formset_factory
from django.http import HttpResponseRedirect
from mezzanine.pages.page_processors import processor_for
from .forms import CellInfoForm, BandInfoForm, BaseBandInfoFormSet, OriginalCoverageSpatialForm, \
BandInfoLayoutEdit
from hs_core import page_processors
from hs_core.views import add_generic_context
from .models import RasterResource
# page processor to populate raster resource specific metadata into my-resources template page
@processor_for(RasterResource)
# TODO: problematic permissions
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
context = page_processors.get_page_context(page, request.user, resource_edit=edit_resource,
extended_metadata_layout=None, request=request)
if not edit_resource and isinstance(context, HttpResponseRedirect):
# sending user to login page
return context
extended_metadata_exists = False
if content_model.metadata.cellInformation or content_model.metadata.bandInformations:
extended_metadata_exists = True
context['extended_metadata_exists'] = extended_metadata_exists
if not edit_resource:
# get the context from content_model
if content_model.metadata.originalCoverage:
ori_coverage_data_dict = {}
ori_coverage_data_dict['units'] = content_model.metadata.originalCoverage.value['units']
ori_coverage_data_dict['projection'] = content_model.metadata.originalCoverage.\
value.get('projection', None)
ori_coverage_data_dict['northlimit'] = content_model.metadata.originalCoverage.\
value['northlimit']
ori_coverage_data_dict['eastlimit'] = content_model.metadata.originalCoverage.\
value['eastlimit']
ori_coverage_data_dict['southlimit'] = content_model.metadata.originalCoverage.\
value['southlimit']
ori_coverage_data_dict['westlimit'] = content_model.metadata.originalCoverage.\
value['westlimit']
ori_coverage_data_dict['projection_string'] = content_model.metadata.originalCoverage.\
value.get('projection_string', None)
ori_coverage_data_dict['datum'] = content_model.metadata.originalCoverage.\
value.get('datum', None)
context['originalCoverage'] = ori_coverage_data_dict
context['cellInformation'] = content_model.metadata.cellInformation
context['bandInformation'] = content_model.metadata.bandInformations
else:
# cellinfo_form
cellinfo_form = CellInfoForm(instance=content_model.metadata.cellInformation,
res_short_id=content_model.short_id,
allow_edit=True,
element_id=content_model.metadata.cellInformation.id
if content_model.metadata.cellInformation else None)
cellinfo_layout = HTML("<div class='form-group col-lg-6 col-xs-12' id='CellInformation'> "
'{% load crispy_forms_tags %} '
'{% crispy cellinfo_form %} '
'</div>')
# bandinfo_formset
BandInfoFormSetEdit = formset_factory(wraps(BandInfoForm)
(partial(BandInfoForm, allow_edit=edit_resource)),
formset=BaseBandInfoFormSet, extra=0)
bandinfo_formset = BandInfoFormSetEdit(
initial=list(content_model.metadata.bandInformations.values()),
prefix='BandInformation')
for form in bandinfo_formset.forms:
if len(form.initial) > 0:
form.action = "/hsapi/_internal/%s/bandinformation/%s/update-metadata/" % \
(content_model.short_id, form.initial['id'])
form.number = form.initial['id']
# original coverage_form
ori_cov_obj = content_model.metadata.originalCoverage
ori_coverage_data_dict = {}
if ori_cov_obj:
ori_coverage_data_dict['units'] = ori_cov_obj.value['units']
ori_coverage_data_dict['projection'] = ori_cov_obj.value.get('projection', None)
ori_coverage_data_dict['northlimit'] = ori_cov_obj.value['northlimit']
ori_coverage_data_dict['eastlimit'] = ori_cov_obj.value['eastlimit']
ori_coverage_data_dict['southlimit'] = ori_cov_obj.value['southlimit']
ori_coverage_data_dict['westlimit'] = ori_cov_obj.value['westlimit']
ori_coverage_data_dict['projection_string'] = ori_cov_obj.value.\
get('projection_string', None)
ori_coverage_data_dict['datum'] = ori_cov_obj.value.get('datum', None)
else:
ori_cov_obj = None
ori_coverage_form = OriginalCoverageSpatialForm(initial=ori_coverage_data_dict,
res_short_id=content_model.short_id,
allow_edit=edit_resource,
element_id=ori_cov_obj.id
if ori_cov_obj else None)
ori_coverage_layout = HTML("""
<div class="form-group col-lg-6 col-xs-12" id="originalcoverage">
{% load crispy_forms_tags %}
{% crispy ori_coverage_form %}
</div>
""")
# update context
ext_md_layout = Layout(HTML("<div>"),
ori_coverage_layout,
cellinfo_layout,
BandInfoLayoutEdit,
HTML("</div>")
)
context = page_processors.get_page_context(page, request.user, resource_edit=edit_resource,
extended_metadata_layout=ext_md_layout,
request=request)
context['ori_coverage_form'] = ori_coverage_form
context['cellinfo_form'] = cellinfo_form
context['bandinfo_formset'] = bandinfo_formset
hs_core_context = add_generic_context(request, page)
context.update(hs_core_context)
return context
| {
"repo_name": "hydroshare/hydroshare",
"path": "hs_geo_raster_resource/page_processors.py",
"copies": "1",
"size": "6675",
"license": "bsd-3-clause",
"hash": 5325800640805224000,
"line_mean": 52.4,
"line_max": 100,
"alpha_frac": 0.57917603,
"autogenerated": false,
"ratio": 4.444074567243675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5523250597243675,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from django.core.exceptions import PermissionDenied
from django.forms.models import formset_factory
from mezzanine.pages.page_processors import processor_for
from hs_core.models import AbstractResource, GenericResource, Relation
from hs_core import languages_iso
from forms import CreatorForm, ContributorForm, SubjectsForm, AbstractForm, RelationForm, \
SourceForm, FundingAgencyForm, BaseCreatorFormSet, BaseContributorFormSet, BaseFormSet, \
MetaDataElementDeleteForm, CoverageTemporalForm, CoverageSpatialForm, ExtendedMetadataForm
from hs_tools_resource.models import SupportedResTypes, ToolResource
from hs_core.views.utils import authorize, ACTION_TO_AUTHORIZE, show_relations_section, \
can_user_copy_resource
from hs_core.hydroshare.resource import METADATA_STATUS_SUFFICIENT, METADATA_STATUS_INSUFFICIENT
from hs_tools_resource.utils import parse_app_url_template
@processor_for(GenericResource)
def landing_page(request, page):
edit_resource = check_resource_mode(request)
return get_page_context(page, request.user, resource_edit=edit_resource, request=request)
# resource type specific app needs to call this method to inject a crispy_form layout
# object for displaying metadata UI for the extended metadata for their resource
def get_page_context(page, user, resource_edit=False, extended_metadata_layout=None, request=None):
"""
:param page: which page to get the template context for
:param user: the user who is viewing the page
:param resource_edit: True if and only if the page should render in edit mode
:param extended_metadata_layout: layout information used to build an ExtendedMetadataForm
:param request: the Django request associated with the page load
:return: the basic template context (a python dict) used to render a resource page. can and
should be extended by page/resource-specific page_processors
TODO: refactor to make it clear that there are two different modes = EDITABLE | READONLY
- split into two functions: get_readonly_page_context(...) and
get_editable_page_context(...)
"""
file_type_error = ''
if request:
file_type_error = request.session.get("file_type_error", None)
if file_type_error:
del request.session["file_type_error"]
content_model = page.get_content_model()
# whether the user has permission to view this resource
can_view = content_model.can_view(request)
if not can_view:
raise PermissionDenied()
discoverable = content_model.raccess.discoverable
validation_error = None
resource_is_mine = False
if user.is_authenticated():
resource_is_mine = content_model.rlabels.is_mine(user)
metadata_status = _get_metadata_status(content_model)
belongs_to_collections = content_model.collections.all()
relevant_tools = None
tool_homepage_url = None
if not resource_edit: # In view mode
content_model_str = str(content_model.content_model).lower()
if content_model_str.lower() == "toolresource":
if content_model.metadata.homepage_url.exists():
tool_homepage_url = content_model.metadata.homepage_url.first().value
relevant_tools = []
# loop through all SupportedResTypes objs (one webapp resources has one
# SupportedResTypes obj)
for res_type in SupportedResTypes.objects.all():
supported_flag = False
for supported_type in res_type.supported_res_types.all():
if content_model_str == supported_type.description.lower():
supported_flag = True
break
if supported_flag:
# reverse lookup: metadata obj --> res obj
tool_res_obj = ToolResource.objects.get(object_id=res_type.object_id)
if tool_res_obj:
sharing_status_supported = False
supported_sharing_status_obj = tool_res_obj.metadata.\
supported_sharing_status.first()
if supported_sharing_status_obj is not None:
suppored_sharing_status_str = supported_sharing_status_obj.\
get_sharing_status_str()
if len(suppored_sharing_status_str) > 0:
res_sharing_status = content_model.raccess.sharing_status
if suppored_sharing_status_str.lower().\
find(res_sharing_status.lower()) != -1:
sharing_status_supported = True
else:
# backward compatible: webapp without supported_sharing_status metadata
# is considered to support all sharing status
sharing_status_supported = True
if sharing_status_supported:
is_authorized = authorize(
request, tool_res_obj.short_id,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE,
raises_exception=False)[1]
if is_authorized:
tool_url = tool_res_obj.metadata.url_bases.first().value \
if tool_res_obj.metadata.url_bases.first() else None
tool_icon_url = tool_res_obj.metadata.tool_icon.first().value \
if tool_res_obj.metadata.tool_icon.first() else "raise-img-error"
hs_term_dict_user = {}
hs_term_dict_user["HS_USR_NAME"] = request.user.username if \
request.user.is_authenticated() else "anonymous"
tool_url_new = parse_app_url_template(
tool_url, [content_model.get_hs_term_dict(), hs_term_dict_user])
if tool_url_new is not None:
tl = {'title': str(tool_res_obj.metadata.title.value),
'icon_url': tool_icon_url,
'url': tool_url_new}
relevant_tools.append(tl)
just_created = False
just_copied = False
create_resource_error = None
just_published = False
if request:
validation_error = check_for_validation(request)
just_created = request.session.get('just_created', False)
if 'just_created' in request.session:
del request.session['just_created']
just_copied = request.session.get('just_copied', False)
if 'just_copied' in request.session:
del request.session['just_copied']
create_resource_error = request.session.get('resource_creation_error', None)
if 'resource_creation_error' in request.session:
del request.session['resource_creation_error']
just_published = request.session.get('just_published', False)
if 'just_published' in request.session:
del request.session['just_published']
bag_url = AbstractResource.bag_url(content_model.short_id)
if user.is_authenticated():
show_content_files = user.uaccess.can_view_resource(content_model)
else:
# if anonymous user getting access to a private resource (since resource is discoverable),
# then don't show content files
show_content_files = content_model.raccess.public
allow_copy = can_user_copy_resource(content_model, user)
# user requested the resource in READONLY mode
if not resource_edit:
temporal_coverages = content_model.metadata.coverages.all().filter(type='period')
if len(temporal_coverages) > 0:
temporal_coverage_data_dict = {}
temporal_coverage = temporal_coverages[0]
temporal_coverage_data_dict['start_date'] = temporal_coverage.value['start']
temporal_coverage_data_dict['end_date'] = temporal_coverage.value['end']
temporal_coverage_data_dict['name'] = temporal_coverage.value.get('name', '')
else:
temporal_coverage_data_dict = None
spatial_coverages = content_model.metadata.coverages.all().exclude(type='period')
if len(spatial_coverages) > 0:
spatial_coverage_data_dict = {}
spatial_coverage = spatial_coverages[0]
spatial_coverage_data_dict['name'] = spatial_coverage.value.get('name', None)
spatial_coverage_data_dict['units'] = spatial_coverage.value['units']
spatial_coverage_data_dict['zunits'] = spatial_coverage.value.get('zunits', None)
spatial_coverage_data_dict['projection'] = spatial_coverage.value.get('projection',
None)
spatial_coverage_data_dict['type'] = spatial_coverage.type
if spatial_coverage.type == 'point':
spatial_coverage_data_dict['east'] = spatial_coverage.value['east']
spatial_coverage_data_dict['north'] = spatial_coverage.value['north']
spatial_coverage_data_dict['elevation'] = spatial_coverage.value.get('elevation',
None)
else:
spatial_coverage_data_dict['northlimit'] = spatial_coverage.value['northlimit']
spatial_coverage_data_dict['eastlimit'] = spatial_coverage.value['eastlimit']
spatial_coverage_data_dict['southlimit'] = spatial_coverage.value['southlimit']
spatial_coverage_data_dict['westlimit'] = spatial_coverage.value['westlimit']
spatial_coverage_data_dict['uplimit'] = spatial_coverage.value.get('uplimit', None)
spatial_coverage_data_dict['downlimit'] = spatial_coverage.value.get('downlimit',
None)
else:
spatial_coverage_data_dict = None
keywords = ",".join([sub.value for sub in content_model.metadata.subjects.all()])
languages_dict = dict(languages_iso.languages)
language = languages_dict[content_model.metadata.language.code] if \
content_model.metadata.language else None
title = content_model.metadata.title.value if content_model.metadata.title else None
abstract = content_model.metadata.description.abstract if \
content_model.metadata.description else None
missing_metadata_elements = content_model.metadata.get_required_missing_elements()
context = {
'resource_edit_mode': resource_edit,
'metadata_form': None,
'citation': content_model.get_citation(),
'title': title,
'abstract': abstract,
'creators': content_model.metadata.creators.all(),
'contributors': content_model.metadata.contributors.all(),
'temporal_coverage': temporal_coverage_data_dict,
'spatial_coverage': spatial_coverage_data_dict,
'language': language,
'keywords': keywords,
'rights': content_model.metadata.rights,
'sources': content_model.metadata.sources.all(),
'relations': content_model.metadata.relations.all(),
'show_relations_section': show_relations_section(content_model),
'fundingagencies': content_model.metadata.funding_agencies.all(),
'metadata_status': metadata_status,
'missing_metadata_elements': missing_metadata_elements,
'validation_error': validation_error if validation_error else None,
'resource_creation_error': create_resource_error,
'relevant_tools': relevant_tools,
'tool_homepage_url': tool_homepage_url,
'file_type_error': file_type_error,
'just_created': just_created,
'just_copied': just_copied,
'just_published': just_published,
'bag_url': bag_url,
'show_content_files': show_content_files,
'discoverable': discoverable,
'resource_is_mine': resource_is_mine,
'allow_resource_copy': allow_copy,
'is_resource_specific_tab_active': False,
'belongs_to_collections': belongs_to_collections
}
if 'task_id' in request.session:
task_id = request.session.get('task_id', None)
if task_id:
context['task_id'] = task_id
del request.session['task_id']
if 'download_path' in request.session:
download_path = request.session.get('download_path', None)
if download_path:
context['download_path'] = download_path
del request.session['download_path']
return context
# user requested the resource in EDIT MODE
# whether the user has permission to change the model
can_change = content_model.can_change(request)
if not can_change:
raise PermissionDenied()
add_creator_modal_form = CreatorForm(allow_edit=can_change, res_short_id=content_model.short_id)
add_contributor_modal_form = ContributorForm(allow_edit=can_change,
res_short_id=content_model.short_id)
add_relation_modal_form = RelationForm(allow_edit=can_change,
res_short_id=content_model.short_id)
add_source_modal_form = SourceForm(allow_edit=can_change, res_short_id=content_model.short_id)
add_fundingagency_modal_form = FundingAgencyForm(allow_edit=can_change,
res_short_id=content_model.short_id)
keywords = ",".join([sub.value for sub in content_model.metadata.subjects.all()])
subjects_form = SubjectsForm(initial={'value': keywords}, allow_edit=can_change,
res_short_id=content_model.short_id, element_id=None)
abstract_form = AbstractForm(instance=content_model.metadata.description,
allow_edit=can_change, res_short_id=content_model.short_id,
element_id=content_model.metadata.description.id if
content_model.metadata.description else None)
CreatorFormSetEdit = formset_factory(wraps(CreatorForm)(partial(CreatorForm,
allow_edit=can_change)),
formset=BaseCreatorFormSet, extra=0)
creator_formset = CreatorFormSetEdit(initial=content_model.metadata.creators.all().values(),
prefix='creator')
index = 0
# TODO: dont track index manually. use enumerate, or zip
for creator_form in creator_formset.forms:
creator_form.action = "/hsapi/_internal/%s/creator/%s/update-metadata/" % \
(content_model.short_id, creator_form.initial['id'])
creator_form.number = creator_form.initial['id']
index += 1
ContributorFormSetEdit = formset_factory(wraps(ContributorForm)(partial(ContributorForm,
allow_edit=can_change)),
formset=BaseContributorFormSet, extra=0)
contributor_formset = ContributorFormSetEdit(initial=content_model.metadata.contributors.all().
values(), prefix='contributor')
index = 0
# TODO: dont track index manually. use enumerate, or zip
for contributor_form in contributor_formset.forms:
contributor_form.action = "/hsapi/_internal/%s/contributor/%s/update-metadata/" % \
(content_model.short_id, contributor_form.initial['id'])
contributor_form.number = contributor_form.initial['id']
index += 1
RelationFormSetEdit = formset_factory(wraps(RelationForm)(partial(RelationForm,
allow_edit=can_change)),
formset=BaseFormSet, extra=0)
relation_formset = RelationFormSetEdit(initial=content_model.metadata.relations.all().values(),
prefix='relation')
for relation_form in relation_formset.forms:
relation_form.action = "/hsapi/_internal/%s/relation/%s/update-metadata/" % \
(content_model.short_id, relation_form.initial['id'])
relation_form.number = relation_form.initial['id']
SourceFormSetEdit = formset_factory(wraps(SourceForm)(partial(SourceForm,
allow_edit=can_change)),
formset=BaseFormSet, extra=0)
source_formset = SourceFormSetEdit(initial=content_model.metadata.sources.all().values(),
prefix='source')
for source_form in source_formset.forms:
source_form.action = "/hsapi/_internal/%s/source/%s/update-metadata/" % \
(content_model.short_id, source_form.initial['id'])
source_form.delete_modal_form = MetaDataElementDeleteForm(content_model.short_id,
'source',
source_form.initial['id'])
source_form.number = source_form.initial['id']
FundingAgencyFormSetEdit = formset_factory(wraps(FundingAgencyForm)(partial(
FundingAgencyForm, allow_edit=can_change)), formset=BaseFormSet, extra=0)
fundingagency_formset = FundingAgencyFormSetEdit(
initial=content_model.metadata.funding_agencies.all().values(), prefix='fundingagency')
for fundingagency_form in fundingagency_formset.forms:
action = "/hsapi/_internal/{}/fundingagnecy/{}/update-metadata/"
action = action.format(content_model.short_id, fundingagency_form.initial['id'])
fundingagency_form.action = action
fundingagency_form.number = fundingagency_form.initial['id']
temporal_coverages = content_model.metadata.coverages.all().filter(type='period')
temporal_coverage_data_dict = {}
if len(temporal_coverages) > 0:
temporal_coverage = temporal_coverages[0]
temporal_coverage_data_dict['start'] = temporal_coverage.value['start']
temporal_coverage_data_dict['end'] = temporal_coverage.value['end']
temporal_coverage_data_dict['name'] = temporal_coverage.value.get('name', '')
temporal_coverage_data_dict['id'] = temporal_coverage.id
else:
temporal_coverage = None
coverage_temporal_form = CoverageTemporalForm(initial=temporal_coverage_data_dict,
allow_edit=can_change,
res_short_id=content_model.short_id,
element_id=temporal_coverage.id if
temporal_coverage else None)
spatial_coverages = content_model.metadata.coverages.all().exclude(type='period')
spatial_coverage_data_dict = {'type': 'point'}
if len(spatial_coverages) > 0:
spatial_coverage = spatial_coverages[0]
spatial_coverage_data_dict['name'] = spatial_coverage.value.get('name', None)
spatial_coverage_data_dict['units'] = spatial_coverage.value['units']
spatial_coverage_data_dict['zunits'] = spatial_coverage.value.get('zunits', None)
spatial_coverage_data_dict['projection'] = spatial_coverage.value.get('projection', None)
spatial_coverage_data_dict['type'] = spatial_coverage.type
spatial_coverage_data_dict['id'] = spatial_coverage.id
if spatial_coverage.type == 'point':
spatial_coverage_data_dict['east'] = spatial_coverage.value['east']
spatial_coverage_data_dict['north'] = spatial_coverage.value['north']
spatial_coverage_data_dict['elevation'] = spatial_coverage.value.get('elevation', None)
else:
spatial_coverage_data_dict['northlimit'] = spatial_coverage.value['northlimit']
spatial_coverage_data_dict['eastlimit'] = spatial_coverage.value['eastlimit']
spatial_coverage_data_dict['southlimit'] = spatial_coverage.value['southlimit']
spatial_coverage_data_dict['westlimit'] = spatial_coverage.value['westlimit']
spatial_coverage_data_dict['uplimit'] = spatial_coverage.value.get('uplimit', None)
spatial_coverage_data_dict['downlimit'] = spatial_coverage.value.get('downlimit', None)
else:
spatial_coverage = None
coverage_spatial_form = CoverageSpatialForm(initial=spatial_coverage_data_dict,
allow_edit=can_change,
res_short_id=content_model.short_id,
element_id=spatial_coverage.id if
spatial_coverage else None)
metadata_form = ExtendedMetadataForm(resource_mode='edit' if can_change else 'view',
extended_metadata_layout=extended_metadata_layout)
context = {
'resource_edit_mode': resource_edit,
'metadata_form': metadata_form,
'creator_formset': creator_formset,
'add_creator_modal_form': add_creator_modal_form,
'creator_profilelink_formset': None,
'title': content_model.metadata.title,
'abstract_form': abstract_form,
'contributor_formset': contributor_formset,
'add_contributor_modal_form': add_contributor_modal_form,
'relation_formset': relation_formset,
'add_relation_modal_form': add_relation_modal_form,
'source_formset': source_formset,
'add_source_modal_form': add_source_modal_form,
'fundingagnency_formset': fundingagency_formset,
'add_fundinagency_modal_form': add_fundingagency_modal_form,
'coverage_temporal_form': coverage_temporal_form,
'coverage_spatial_form': coverage_spatial_form,
'spatial_coverage': spatial_coverage_data_dict,
'subjects_form': subjects_form,
'metadata_status': metadata_status,
'missing_metadata_elements': content_model.metadata.get_required_missing_elements(),
'citation': content_model.get_citation(),
'extended_metadata_layout': extended_metadata_layout,
'bag_url': bag_url,
'current_user': user,
'show_content_files': show_content_files,
'validation_error': validation_error if validation_error else None,
'discoverable': discoverable,
'resource_is_mine': resource_is_mine,
'relation_source_types': tuple((type_value, type_display)
for type_value, type_display in Relation.SOURCE_TYPES
if type_value != 'isReplacedBy' and
type_value != 'isVersionOf' and
type_value != 'hasPart'),
'is_resource_specific_tab_active': False,
'belongs_to_collections': belongs_to_collections
}
return context
def check_resource_mode(request):
"""
Determines whether the `request` represents an attempt to edit a resource.
A request is considered an attempt
to edit if any of the following conditions are met:
1. the HTTP verb is not "GET"
2. the HTTP verb is "GET" and the 'resource-mode' property is set to 'edit'
This function erases the 'resource-mode' property of `request.session` if it exists.
:param request: the `request` for a resource
:return: True if the request represents an attempt to edit a resource, and False otherwise.
"""
if request.method == "GET":
edit_resource = request.session.get('resource-mode', None) == 'edit'
if edit_resource:
del request.session['resource-mode']
else:
edit_resource = request.GET.get('resource-mode', None) == 'edit'
else:
edit_resource = True
return edit_resource
def check_for_validation(request):
if request.method == "GET":
validation_error = request.session.get('validation_error', None)
if validation_error:
del request.session['validation_error']
return validation_error
return None
def _get_metadata_status(resource):
if resource.metadata.has_all_required_elements():
metadata_status = METADATA_STATUS_SUFFICIENT
else:
metadata_status = METADATA_STATUS_INSUFFICIENT
return metadata_status
| {
"repo_name": "FescueFungiShare/hydroshare",
"path": "hs_core/page_processors.py",
"copies": "1",
"size": "25522",
"license": "bsd-3-clause",
"hash": -8755338842669869000,
"line_mean": 51.9502074689,
"line_max": 100,
"alpha_frac": 0.5926259698,
"autogenerated": false,
"ratio": 4.460328556448794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028008454516234263,
"num_lines": 482
} |
from functools import partial, wraps
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.utils.decorators import available_attrs
from hilbert.middleware import _redirect
__all__ = (
'ajax_login_required',
'ajax_only',
'anonymous_required',
'secure',
)
def ajax_login_required(view_func):
"""Handle non-authenticated users differently if it is an AJAX request."""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.is_ajax():
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
else:
response = http.HttpResponse()
response['X-Django-Requires-Auth'] = True
response['X-Django-Login-Url'] = settings.LOGIN_URL
return response
else:
return login_required(view_func)(request, *args, **kwargs)
return _wrapped_view
def ajax_only(view_func):
"""Required the view is only accessed via AJAX."""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.is_ajax():
return view_func(request, *args, **kwargs)
else:
return http.HttpResponseBadRequest()
return _wrapped_view
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func)
def secure(view_func):
"""Handles SSL redirect on the view level."""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if not request.is_secure():
redirect = _redirect(request, True)
if redirect:
# Redirect might be None if SSL is not enabled
return redirect
return view_func(request, *args, **kwargs)
return _wrapped_view
| {
"repo_name": "mlavin/django-hilbert",
"path": "hilbert/decorators.py",
"copies": "1",
"size": "2442",
"license": "bsd-2-clause",
"hash": -5705691431875865000,
"line_mean": 28.7804878049,
"line_max": 78,
"alpha_frac": 0.6171171171,
"autogenerated": false,
"ratio": 4.090452261306533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012195121951219513,
"num_lines": 82
} |
from functools import partial, wraps
from psycopg2 import connect as _connect
from psycopg2.extensions import register_adapter
from spans import *
from ._utils import adapt_range, register_range_caster, query_range_oids, floatrange_preprocess
__all__ = [
"connect",
"register_range_type"
]
@wraps(_connect)
def connect(*keys, **kwargs):
conn = _connect(*keys, **kwargs)
if conn.server_version < 90200:
raise RuntimeError("Range types not available in version {version}".format(
conn.server_version))
# Register range types
register_range_caster(
"int4range", intrange, oid=3904, subtype_oid=23, array_oid=3905, scope=conn)
register_range_caster(
"int8range", intrange, oid=3926, subtype_oid=20, array_oid=3927, scope=conn)
register_range_caster(
"numrange", floatrange_preprocess, oid=3906, subtype_oid=1700, array_oid=3907, scope=conn)
register_range_caster(
"daterange", daterange, oid=3912, subtype_oid=1082, array_oid=3913, scope=conn)
register_range_caster(
"tsrange", datetimerange, oid=3908, subtype_oid=1114, array_oid=3909, scope=conn)
return conn
def register_range_type(pgrange, pyrange, conn):
"""
Register a new range type as a PostgreSQL range.
>>> register_range_type("int4range", intrange, conn)
The above will make sure intrange is regarded as an int4range for queries
and that int4ranges will be cast into intrange when fetching rows.
pgrange should be the full name including schema for the custom range type.
Note that adaption is global, meaning if a range type is passed to a regular
psycopg2 connection it will adapt it to its proper range type. Parsing of
rows from the database however is not global and just set on a per connection
basis.
"""
register_adapter(pyrange, partial(adapt_range, pgrange))
register_range_caster(
pgrange, pyrange, *query_range_oids(pgrange, conn), scope=conn)
register_adapter(intrange, partial(adapt_range, "int4range"))
register_adapter(floatrange, partial(adapt_range, "numrange"))
register_adapter(daterange, partial(adapt_range, "daterange"))
register_adapter(datetimerange, partial(adapt_range, "tsrange"))
| {
"repo_name": "runfalk/psycospans",
"path": "psycospans/__init__.py",
"copies": "1",
"size": "2248",
"license": "mit",
"hash": -3028472899252626000,
"line_mean": 36.4666666667,
"line_max": 98,
"alpha_frac": 0.7153024911,
"autogenerated": false,
"ratio": 3.608346709470305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48236492005703047,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from toolz import merge
import pandas as pd
from ..base import tokenize
def rolling_chunk(func, part1, part2, window, *args):
if part1.shape[0] < window:
raise NotImplementedError("Window larger than partition size")
if window > 1:
extra = window - 1
combined = pd.concat([part1.iloc[-extra:], part2])
applied = func(combined, window, *args)
return applied.iloc[extra:]
else:
return func(part2, window, *args)
def wrap_rolling(func):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
if not isinstance(window, int):
raise TypeError('Window must be an integer')
if window < 0:
raise ValueError('Window must be a positive integer')
if 'freq' in kwargs or 'how' in kwargs:
raise NotImplementedError('Resampling before rolling computations '
'not supported')
old_name = arg._name
token = tokenize(func, arg, window, args, kwargs)
new_name = 'rolling-' + token
f = partial(func, **kwargs)
dsk = {(new_name, 0): (f, (old_name, 0), window) + args}
for i in range(1, arg.npartitions + 1):
dsk[(new_name, i)] = (rolling_chunk, f, (old_name, i - 1),
(old_name, i), window) + args
return type(arg)(merge(arg.dask, dsk), new_name,
arg.column_info, arg.divisions)
return rolling
rolling_count = wrap_rolling(pd.rolling_count)
rolling_sum = wrap_rolling(pd.rolling_sum)
rolling_mean = wrap_rolling(pd.rolling_mean)
rolling_median = wrap_rolling(pd.rolling_median)
rolling_min = wrap_rolling(pd.rolling_min)
rolling_max = wrap_rolling(pd.rolling_max)
rolling_std = wrap_rolling(pd.rolling_std)
rolling_var = wrap_rolling(pd.rolling_var)
rolling_skew = wrap_rolling(pd.rolling_skew)
rolling_kurt = wrap_rolling(pd.rolling_kurt)
rolling_quantile = wrap_rolling(pd.rolling_quantile)
rolling_apply = wrap_rolling(pd.rolling_apply)
rolling_window = wrap_rolling(pd.rolling_window)
| {
"repo_name": "wiso/dask",
"path": "dask/dataframe/rolling.py",
"copies": "4",
"size": "2175",
"license": "bsd-3-clause",
"hash": -2217907968090794800,
"line_mean": 37.1578947368,
"line_max": 79,
"alpha_frac": 0.6340229885,
"autogenerated": false,
"ratio": 3.6189683860232944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
from functools import partial, wraps
from toolz import merge
import pandas as pd
from .core import tokenize
def rolling_chunk(func, part1, part2, window, *args):
if part1.shape[0] < window:
raise NotImplementedError("Window larger than partition size")
if window > 1:
extra = window - 1
combined = pd.concat([part1.iloc[-extra:], part2])
applied = func(combined, window, *args)
return applied.iloc[extra:]
else:
return func(part2, window, *args)
def wrap_rolling(func):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
if not isinstance(window, int):
raise TypeError('Window must be an integer')
if window < 0:
raise ValueError('Window must be a positive integer')
if 'freq' in kwargs or 'how' in kwargs:
raise NotImplementedError('Resampling before rolling computations '
'not supported')
old_name = arg._name
token = tokenize((arg._name, window, args, sorted(kwargs.items())))
new_name = 'rolling-' + token
f = partial(func, **kwargs)
dsk = {(new_name, 0): (f, (old_name, 0), window) + args}
for i in range(1, arg.npartitions + 1):
dsk[(new_name, i)] = (rolling_chunk, f, (old_name, i - 1),
(old_name, i), window) + args
return type(arg)(merge(arg.dask, dsk), new_name,
arg.column_info, arg.divisions)
return rolling
rolling_count = wrap_rolling(pd.rolling_count)
rolling_sum = wrap_rolling(pd.rolling_sum)
rolling_mean = wrap_rolling(pd.rolling_mean)
rolling_median = wrap_rolling(pd.rolling_median)
rolling_min = wrap_rolling(pd.rolling_min)
rolling_max = wrap_rolling(pd.rolling_max)
rolling_std = wrap_rolling(pd.rolling_std)
rolling_var = wrap_rolling(pd.rolling_var)
rolling_skew = wrap_rolling(pd.rolling_skew)
rolling_kurt = wrap_rolling(pd.rolling_kurt)
rolling_quantile = wrap_rolling(pd.rolling_quantile)
rolling_apply = wrap_rolling(pd.rolling_apply)
rolling_window = wrap_rolling(pd.rolling_window)
| {
"repo_name": "jayhetee/dask",
"path": "dask/dataframe/rolling.py",
"copies": "1",
"size": "2192",
"license": "bsd-3-clause",
"hash": 5056795725002110000,
"line_mean": 37.4561403509,
"line_max": 79,
"alpha_frac": 0.6341240876,
"autogenerated": false,
"ratio": 3.6231404958677684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757264583467768,
"avg_score": 0,
"num_lines": 57
} |
from functools import partial, wraps
import numpy as np
from matplotlib import pyplot as plt
from core.config import Config, ALGORITHMS
from utils import generate_static_nodes
from experiments import evaluation
LABELS = dict(zip(ALGORITHMS, ['Classical MDS', 'Anchored MDS', 'MDS-RFID']))
def runexperiment(func):
@wraps(func)
def wrapper(**kwargs):
plot_hanldes = []
plot_labels = []
for algorithm in ALGORITHMS:
kwargs['algorithm'] = algorithm
x_axis, errors = func(**kwargs)
errors = np.array(errors)
first_q, median, third_q = evaluation.first_third_quartile_and_median(errors)
handles = getattr(evaluation, 'plot_{}'.format(func.__name__))(first_q, median, third_q, x_axis=x_axis)
plot_hanldes.extend(handles)
plot_labels.extend([LABELS[algorithm], 'IQR boundaries'])
plt.legend(loc='upper left', fontsize=12, labels=plot_labels, handles=plot_hanldes)
plt.show()
return wrapper
@runexperiment
def rmse_vs_noise(algorithm=None, config=None, no_of_trials=10):
errors = []
sigmas = np.linspace(0, 4, 40)
for sigma in sigmas:
config.sigma = sigma
# generate rmse from coordinates, remembering to not pass last_n_coords to function
error = [evaluation.rmse(*generate_static_nodes(config, algorithm)[:2]) for i in range(no_of_trials)]
errors.append(error)
return sigmas, errors
@runexperiment
def rmse_vs_anchors(algorithm=None, config=None, no_of_trials=10):
errors = []
anchors = range(3, 9)
for anc in anchors:
config.no_of_anchors = anc
# generate rmse from coordinates, remembering to not pass last_n_coords to function
error = [evaluation.rmse(*generate_static_nodes(config, algorithm)[:2]) for i in range(no_of_trials)]
errors.append(error)
return anchors, errors
| {
"repo_name": "mkoledoye/mds_experiments",
"path": "experiments/comparisons.py",
"copies": "2",
"size": "1735",
"license": "mit",
"hash": 7086825199432404000,
"line_mean": 33.7,
"line_max": 106,
"alpha_frac": 0.7291066282,
"autogenerated": false,
"ratio": 3.0817051509769096,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.481081177917691,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
class Assert:
def __init__(self):
self.messages_error = []
self.messages_warning = []
def __call__(self, expression, error_message):
if not expression:
self.messages_error.append(error_message)
def warn(self, expression, warning_message):
if not expression:
self.messages_warning.append(warning_message)
def __len__(self):
return len(self.messages_error)
def __iter__(self):
for msg in self.messages_error:
yield msg
def __repr__(self):
return 'Assert oject with {} error messages'.format(len(self))
def __str__(self):
if not self.messages_error:
str_ = 'No errors found'
elif len(self.messages_error) == 1:
str_ = '1 error found: {}'.format(self.messages_error[0])
else:
str_ = ('{} errors found: \n * {}'
.format(len(self.messages_error),
'\n * '.join(self.messages_error)))
if len(self.messages_warning) == 1:
str_ += '\n\n 1 warning: {}'.format(self.messages_warning[0])
elif len(self.messages_warning) > 1:
str_ += ('\n\n {} warnings: \n * {}'
.format(len(self.messages_warning),
'\n * '.join(self.messages_error)))
return str_
def validator(fn):
# TODO: verify fn signature
@wraps(fn)
def wrapped(**kwargs):
if 'assert_' in kwargs:
raise TypeError('Do not include the assert_ parameter in '
'validator functions')
if 'data' in kwargs:
raise TypeError('Do not include the data parameter in '
'validator functions')
return partial(fn, **kwargs)
return wrapped
@validator
def validate_schema(assert_, data, schema, error_on_extra_cols=False):
"""Check if a data frame complies with a schema
"""
cols = set(data.columns)
expected = set(schema)
missing = expected - cols
unexpected = cols - expected
msg = 'Missing columns: {missing}.'.format(missing=missing)
assert_(not missing, msg)
msg = 'Unexpected columns {unexpected}'.format(unexpected=unexpected)
caller = assert_ if error_on_extra_cols else assert_.warn
caller(not unexpected, msg)
# validate column types (as many as you can)
dtypes = data.dtypes.astype(str).to_dict()
for name, dtype in dtypes.items():
expected = schema.get(name)
if expected is not None:
msg = ('Wrong dtype for column "{name}". '
'Expected: "{expected}". Got: "{dtype}"'
.format(name=name, expected=expected, dtype=dtype))
assert_(dtype == expected, msg)
return assert_
def data_frame_validator(df, validators):
"""
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': np.random.rand(10), 'y': np.random.rand(10)})
>>> data_frame_validator(df,
... [validate_schema(schema={'x': 'int', 'z': 'int'})])
"""
assert_ = Assert()
for validator in validators:
validator(assert_=assert_, data=df)
if len(assert_):
raise AssertionError(str(assert_))
| {
"repo_name": "edublancas/python-ds-tools",
"path": "src/dstools/pipeline/validators/validators.py",
"copies": "2",
"size": "3348",
"license": "mit",
"hash": -175116121769602000,
"line_mean": 28.1130434783,
"line_max": 79,
"alpha_frac": 0.557646356,
"autogenerated": false,
"ratio": 4.133333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5690979689333334,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
# This wrapper takes a class, and wraps all methods within the class with the
# specified decorator.
# This only supports instance methods, not classmethods and static methods???
def printer():
def handler(_func):
@wraps(_func)
def wrapper(*args, **kargs):
print "Start {}".format(_func.__name__)
resp = _func(*args, **kargs)
print "End {}".format(_func.__name__)
return resp
return wrapper
return handler
# NOTE: staticmethods and class methods are NOT CALLABLE
def wrap_all_methods(cls=None, wrapper=None):
if not cls:
return partial(wrap_all_methods, wrapper=wrapper)
import pdb;pdb.set_trace()
for attr, value in vars(cls).items():
if callable(value):
setattr(cls, attr, wrapper(value))
return cls
@wrap_all_methods(wrapper=printer())
class X(object):
@classmethod
def i_wont_get_wrapped(cls):
pass
@staticmethod
def neither_will_i():
pass
def a(self):
return
def b(self):
return
x = X()
x.a()
x.b()
x.i_wont_get_wrapped()
x.neither_will_i()
| {
"repo_name": "mr-uuid/snippets",
"path": "python/wrappers/class_wrappers_continued.py",
"copies": "1",
"size": "1177",
"license": "mit",
"hash": -4357111427546172000,
"line_mean": 20.7962962963,
"line_max": 77,
"alpha_frac": 0.6108751062,
"autogenerated": false,
"ratio": 3.784565916398714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4895441022598714,
"avg_score": null,
"num_lines": null
} |
from functools import partial, wraps
from django.forms import formset_factory
from crispy_forms.layout import Layout, HTML
from hs_core import page_processors
from hs_core.views import add_generic_context
from hs_core.forms import BaseFormSet, MetaDataElementDeleteForm
from hs_modflow_modelinstance.models import MODFLOWModelInstanceResource
from hs_modflow_modelinstance.forms import ModelOutputForm, ExecutedByForm, StudyAreaForm, \
GridDimensionsForm, StressPeriodForm, GroundWaterFlowForm, BoundaryConditionForm, \
ModelCalibrationForm, ModelInputForm, GeneralElementsForm, ModelInputLayoutEdit, \
ModalDialogLayoutAddModelInput
from mezzanine.pages.page_processors import processor_for
@processor_for(MODFLOWModelInstanceResource)
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
if not edit_resource:
# get the context from hs_core
context = page_processors.get_page_context(page,
request.user,
request=request,
resource_edit=edit_resource,
extended_metadata_layout=None)
extended_metadata_exists = False
if content_model.metadata.model_output or \
content_model.metadata.executed_by or \
content_model.metadata.study_area or \
content_model.metadata.grid_dimensions or \
content_model.metadata.stress_period or \
content_model.metadata.ground_water_flow or \
content_model.metadata.boundary_condition or \
content_model.metadata.model_calibration or \
content_model.metadata.model_inputs or \
content_model.metadata.general_elements:
extended_metadata_exists = True
context['extended_metadata_exists'] = extended_metadata_exists
context['model_output'] = content_model.metadata.model_output
context['executed_by'] = content_model.metadata.executed_by
context['study_area'] = content_model.metadata.study_area
context['grid_dimensions'] = content_model.metadata.grid_dimensions
context['stress_period'] = content_model.metadata.stress_period
context['ground_water_flow'] = content_model.metadata.ground_water_flow
context['boundary_condition'] = content_model.metadata.boundary_condition
context['model_calibration'] = content_model.metadata.model_calibration
context['model_inputs'] = content_model.metadata.model_inputs
context['general_elements'] = content_model.metadata.general_elements
# add MODFLOW Model parameters context
else:
model_output_form = ModelOutputForm(instance=content_model.metadata.model_output,
res_short_id=content_model.short_id,
element_id=content_model.metadata.model_output.id
if content_model.metadata.model_output else None)
executed_by_form = ExecutedByForm(instance=content_model.metadata.executed_by,
res_short_id=content_model.short_id,
element_id=content_model.metadata.executed_by.id
if content_model.metadata.executed_by else None)
study_area_form = StudyAreaForm(instance=content_model.metadata.study_area,
res_short_id=content_model.short_id,
element_id=content_model.metadata.study_area.id
if content_model.metadata.study_area else None)
grid_dimensions_form = GridDimensionsForm(
instance=content_model.metadata.grid_dimensions,
res_short_id=content_model.short_id,
element_id=content_model.metadata.grid_dimensions.id
if content_model.metadata.grid_dimensions else None)
stress_period_form = StressPeriodForm(instance=content_model.metadata.stress_period,
res_short_id=content_model.short_id,
element_id=content_model.metadata.stress_period.id
if content_model.metadata.stress_period else None)
ground_water_flow_form = GroundWaterFlowForm(
instance=content_model.metadata.ground_water_flow,
res_short_id=content_model.short_id,
element_id=content_model.metadata.ground_water_flow.id
if content_model.metadata.ground_water_flow else None)
boundary_condition_form = BoundaryConditionForm(
instance=content_model.metadata.boundary_condition,
res_short_id=content_model.short_id,
element_id=content_model.metadata.boundary_condition.id
if content_model.metadata.boundary_condition else None)
model_calibration_form = ModelCalibrationForm(
instance=content_model.metadata.model_calibration,
res_short_id=content_model.short_id,
element_id=content_model.metadata.model_calibration.id
if content_model.metadata.model_calibration else None)
ModelInputFormSetEdit = formset_factory(wraps(ModelInputForm)(partial(ModelInputForm,
allow_edit=True)),
formset=BaseFormSet, extra=0)
model_input_formset = ModelInputFormSetEdit(
initial=content_model.metadata.model_inputs.values(),
prefix='modelinput')
for model_input_form in model_input_formset.forms:
if len(model_input_form.initial) > 0:
model_input_form.action = "/hydroshare/hsapi/_internal/%s/modelinput/%s/update-metadata/" % \
(content_model.short_id, model_input_form.initial['id'])
model_input_form.delete_modal_form = MetaDataElementDeleteForm(
content_model.short_id, 'modelinput',
model_input_form.initial['id'])
model_input_form.number = model_input_form.initial['id']
else:
model_input_form.action = "/hydroshare/hsapi/_internal/%s/modelinput/add-metadata/" % \
content_model.short_id
add_modelinput_modal_form = ModelInputForm(allow_edit=False,
res_short_id=content_model.short_id)
general_elements_form = GeneralElementsForm(
instance=content_model.metadata.general_elements,
res_short_id=content_model.short_id,
element_id=content_model.metadata.general_elements.id
if content_model.metadata.general_elements else None)
ext_md_layout = Layout(HTML("<div class='row'><div class='col-xs-12 col-sm-6'>"
"<div class='form-group' id='modeloutput'> "
'{% load crispy_forms_tags %} '
'{% crispy model_output_form %} '
'</div>'),
HTML('<div class="form-group" id="executedby"> '
'{% load crispy_forms_tags %} '
'{% crispy executed_by_form %} '
'</div> '),
HTML('<div class="form-group" id="boundarycondition"> '
'{% load crispy_forms_tags %} '
'{% crispy boundary_condition_form %} '
'</div>'),
HTML('<div class="form-group" id="generalelements"> '
'{% load crispy_forms_tags %} '
'{% crispy general_elements_form %} '
'</div>'),
HTML("</div>"),
ModelInputLayoutEdit,
ModalDialogLayoutAddModelInput,
HTML('<div class="col-xs-12 col-sm-6">'
'<div class="form-group" id="studyarea"> '
'{% load crispy_forms_tags %} '
'{% crispy study_area_form %} '
'</div> '),
HTML('<div class="form-group" id="griddimensions"> '
'{% load crispy_forms_tags %} '
'{% crispy grid_dimensions_form %} '
'</div>'),
HTML('<div class="form-group" id="stressperiod"> '
'{% load crispy_forms_tags %} '
'{% crispy stress_period_form %} '
'</div>'),
HTML('<div class="form-group" id="groundwaterflow"> '
'{% load crispy_forms_tags %} '
'{% crispy ground_water_flow_form %} '
'</div>'),
HTML('<div class="form-group" id="modelcalibration"> '
'{% load crispy_forms_tags %} '
'{% crispy model_calibration_form %} '
'</div></div></div>')
)
# get the context from hs_core
context = page_processors.get_page_context(page,
request.user,
resource_edit=edit_resource,
extended_metadata_layout=ext_md_layout,
request=request)
context['resource_type'] = 'MODFLOW Model Instance Resource'
context['model_output_form'] = model_output_form
context['executed_by_form'] = executed_by_form
context['study_area_form'] = study_area_form
context['grid_dimensions_form'] = grid_dimensions_form
context['stress_period_form'] = stress_period_form
context['ground_water_flow_form'] = ground_water_flow_form
context['boundary_condition_form'] = boundary_condition_form
context['model_calibration_form'] = model_calibration_form
context['model_input_formset'] = model_input_formset
context['add_modelinput_modal_form'] = add_modelinput_modal_form
context['general_elements_form'] = general_elements_form
hs_core_context = add_generic_context(request, page)
context.update(hs_core_context)
return context
| {
"repo_name": "ResearchSoftwareInstitute/MyHPOM",
"path": "hs_modflow_modelinstance/page_processors.py",
"copies": "1",
"size": "11404",
"license": "bsd-3-clause",
"hash": 4346826761021114400,
"line_mean": 54.1773399015,
"line_max": 109,
"alpha_frac": 0.5222728867,
"autogenerated": false,
"ratio": 4.7635756056808685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.578584849238087,
"avg_score": null,
"num_lines": null
} |
from functools import reduce as ft_reduce
from .errors import CraftAiError
from .formatters import format_decision_rules
from .operators import OPERATORS
def _is_is_reducer(rule_1, rule_2):
if rule_1["operand"] and (rule_1["operand"] != rule_2["operand"]):
raise CraftAiError(
"Operator '{}' can't have different value. Set to '{}' and receive '{}'".format(
OPERATORS["IS"], rule_1["operand"], rule_2["operand"]
)
)
return {
"property": rule_1["property"],
"operator": OPERATORS["IS"],
"operand": rule_2["operand"],
}
def _in_in_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2_from = rule_2["operand"][0]
op_2_to = rule_2["operand"][1]
op_1_is_cyclic = op_1_from > op_1_to
op_2_is_cyclic = op_2_from > op_2_to
op_2_from_in_op_1 = (
(op_2_from >= op_1_from or op_2_from <= op_1_to)
if op_1_is_cyclic
else (op_2_from >= op_1_from and op_2_from <= op_1_to)
)
op_2_to_in_op_1 = (
(op_2_to >= op_1_from or op_2_to <= op_1_to)
if op_1_is_cyclic
else (op_2_to >= op_1_from and op_2_to <= op_1_to)
)
op_1_from_in_op_2 = (
(op_1_from >= op_2_from or op_1_from <= op_2_to)
if op_2_is_cyclic
else (op_1_from >= op_2_from and op_1_from <= op_2_to)
)
op_1_to_in_op_2 = (
(op_1_to >= op_2_from or op_1_to <= op_2_to)
if op_2_is_cyclic
else (op_1_to >= op_2_from and op_1_to <= op_2_to)
)
if op_1_from_in_op_2 and op_1_to_in_op_2:
# op_1 belongs to op_2
# | op_1 |
# | op_2 |
return rule_1
if op_2_from_in_op_1 and op_2_to_in_op_1:
# op_2 belongs to op_1
# | op_1 |
# | op_2 |
return rule_2
if op_2_from_in_op_1 and op_1_to_in_op_2:
# overlap 1
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_2_from, op_1_to],
}
if op_2_to_in_op_1 and op_1_from_in_op_2:
# overlap 2
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_1_from, op_2_to],
}
# disjointed
# | op_1 |
# | op_2 |
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
def _in_gte_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2 = rule_2["operand"]
op_1_is_cyclic = op_1_from > op_1_to
if op_1_is_cyclic:
# Cyclics makes no sense with single bound limits
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_to:
# op_2 after op_1, disjointed
# | op_1 |
# |op_2
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_from and op_2 < op_1_to:
# op_2 belongs to op_1
# | op_1 |
# |op_2
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_2, op_1_to],
}
# op_2 before op_1
# | op_1 |
# |op_2
return rule_1
def _in_lt_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2 = rule_2["operand"]
op_1_is_cyclic = op_1_from > op_1_to
if op_1_is_cyclic:
# Cyclics makes no sense with single bound limits
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 < op_1_from:
# op_2 before op_1, disjointed
# | op_1 |
# op_2|
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_from and op_2 < op_1_to:
# op_2 belongs to op_1
# | op_1 |
# |op_2
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_1_from, op_2],
}
# op_2 after op_1
# | op_1 |
# op_2|
return rule_1
def _gte_lt_reducer(rule_1, rule_2):
new_lower_bound = rule_1["operand"]
new_upper_bound = rule_2["operand"]
if new_upper_bound < new_lower_bound:
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [new_lower_bound, new_upper_bound],
}
REDUCER_FROM_DECISION_RULE = {
OPERATORS["IS"]: {OPERATORS["IS"]: _is_is_reducer},
OPERATORS["IN_INTERVAL"]: {
OPERATORS["IN_INTERVAL"]: _in_in_reducer,
OPERATORS["GTE"]: _in_gte_reducer,
OPERATORS["LT"]: _in_lt_reducer,
},
OPERATORS["GTE"]: {
OPERATORS["IN_INTERVAL"]: lambda rule_1, rule_2: _in_gte_reducer(
rule_2, rule_1
),
OPERATORS["GTE"]: lambda rule_1, rule_2: {
"property": rule_1["property"],
"operator": OPERATORS["GTE"],
"operand": max(rule_1["operand"], rule_2["operand"]),
},
OPERATORS["LT"]: _gte_lt_reducer,
},
OPERATORS["LT"]: {
OPERATORS["IN_INTERVAL"]: lambda rule_1, rule_2: _in_lt_reducer(rule_2, rule_1),
OPERATORS["GTE"]: lambda rule_1, rule_2: _gte_lt_reducer(rule_2, rule_1),
OPERATORS["LT"]: lambda rule_1, rule_2: {
"property": rule_1["property"],
"operator": OPERATORS["LT"],
"operand": min(rule_1["operand"], rule_2["operand"]),
},
},
}
def _decision_rules_reducer(rule_1, rule_2):
if rule_1 is None or rule_2 is None:
return rule_1 if rule_1 is not None else rule_2
if (
rule_1["operator"] not in REDUCER_FROM_DECISION_RULE
or rule_2["operator"] not in REDUCER_FROM_DECISION_RULE[rule_1["operator"]]
):
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""incompatible operators.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
return REDUCER_FROM_DECISION_RULE[rule_1["operator"]][rule_2["operator"]](
rule_1, rule_2
)
def _unique_seq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def reduce_decision_rules(rules):
properties = _unique_seq([rule["property"] for rule in rules])
return [
ft_reduce(
_decision_rules_reducer, [rule for rule in rules if rule["property"] == p]
)
for p in properties
]
| {
"repo_name": "craft-ai/craft-ai-client-python",
"path": "craft_ai/reducer.py",
"copies": "1",
"size": "8085",
"license": "bsd-3-clause",
"hash": -3451230810799561700,
"line_mean": 30.2162162162,
"line_max": 92,
"alpha_frac": 0.4979591837,
"autogenerated": false,
"ratio": 3.161908486507626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9157326032098904,
"avg_score": 0.0005083276217445617,
"num_lines": 259
} |
from functools import reduce, lru_cache
import numpy as np
from typing import List, Any, Callable
import scipy.integrate as integrate
from mule_local.rexi.pcirexi.gauss_cache import GaussCache
from mule_local.rexi.pcirexi.section import section
def _complex_quad(func, a, b):
real = integrate.quad((lambda a: func(a).real), a, b)[0]
imag = integrate.quad((lambda a: func(a).imag), a, b)[0]
return real + 1j * imag
class CauchyIntegrator:
target_function: Callable[[Any], Any] # exp for REXI
bs: List[complex] # numerator of terms
a_s: List[complex] # part of the denominator
terms_number: int # overall number of REXI terms
terms_section_list: int # REXI terms per section
sections: List[section.Section] # list of contour functions
def __init__(self, sections, target_function, terms_number=20, integral='trapeze', arc_length_distribution=False,
normalize_for_zero=False, g_c=GaussCache()):
self.g_c = g_c
self.target_function = target_function
self.sections = sections
self.terms_number = terms_number
self.integral = integral
self._calculate_quad_points_for_sections(arc_length_distribution)
if integral.startswith("trapeze") or integral.startswith("lobatto"):
self.edge_to_edge = True
# Temporarly increase terms. Later undone by merging values from bs and a_s
self.terms_section_list = [t + 1 for t in self.terms_section_list]
else:
self.edge_to_edge = False
self._calculateTable()
if normalize_for_zero:
self._normalize_for_zero()
@lru_cache(maxsize=32)
def get_quadrature_points_and_weights(self, g_c, integral, terms_number):
# if terms_number % self.terms_per_section != 0:
# print("#terms is not dividable by operations_per_section")
if (not integral.startswith('trapeze')) and (not integral.startswith('midpoint')):
if integral.startswith('lobatto'):
if terms_number < 2:
print("Lobatto needs at least 2 base_nodes")
raise
print("Ig: lobatto")
base_nodes, weights_original = g_c.gauss_lobatto(terms_number, 20)
elif integral.startswith('legendre'):
base_nodes, weights_original = g_c.gauss_legendre(terms_number, 20)
elif integral.startswith('chebychev'):
raise
else:
print("Unknown Interation")
raise
weights = [float(w) / 2 for w in weights_original]
base_nodes = [float(b) / 2 + 0.5 for b in base_nodes]
elif integral.startswith('trapeze'):
if terms_number == 0:
base_nodes = []
weights = []
else:
base_nodes = np.linspace(0, 1, terms_number)
if terms_number <= 2:
weights = [1 / terms_number] * terms_number
else:
devisor = (2 * (terms_number - 1))
weights = [1 / devisor] + [2 / devisor] * (terms_number - 2) + [1 / devisor]
elif integral.startswith('midpoint'):
if terms_number == 0:
base_nodes = []
weights = []
else:
base_nodes = np.linspace(1 / (2 * terms_number), 1 - 1 / (2 * terms_number),
terms_number)
weights = [1 / (terms_number)] * terms_number
else:
raise
return (base_nodes, weights)
def _calculateTable(self):
# calculates numerator (bs) and denominator addend (as)
self.bs = []
self.a_s = []
for j in range(len(self.sections)):
current_section = self.sections[j]
terms = self.terms_section_list[j]
base_nodes, weights = self.get_quadrature_points_and_weights(self.g_c, self.integral, terms)
for i in range(terms):
# prepare one REXI term
alpha_for_current_term = base_nodes[i]
contour_pos = current_section.interpolate(alpha_for_current_term)
contour_derivative = current_section.evaluateDerivative(alpha_for_current_term)
function_evaluation_at_contour_pos = self.target_function(contour_pos)
b = -1 / (2j * np.pi) * function_evaluation_at_contour_pos * contour_derivative * weights[i]
self.bs.append(b)
self.a_s.append(-contour_pos)
if self.edge_to_edge:
# Undo temporary increase of terms
current_transition = 0
for i in range(len(self.sections)):
# Merge values at equal contour position
self.bs[current_transition] += self.bs[current_transition - 1]
self.a_s[current_transition] = self.a_s[current_transition] / 2 + self.a_s[current_transition - 1] / 2
current_transition += self.terms_section_list[i]
current_unwanted = 0
for i in range(len(self.sections)):
# Pop unwanted values
current_unwanted += self.terms_section_list[i] - 1
self.bs.pop(current_unwanted)
self.a_s.pop(current_unwanted)
def _normalize_for_zero(self):
current = self.approximate_target_function(0)
actual = self.target_function(0)
factor = actual.real / current.real
self.bs = [b * factor for b in self.bs]
def approximate_target_function(self, x):
sum: complex = 0j
for s in range(0, len(self.bs)):
sum += self.bs[s] / (self.a_s[s] + x)
return sum
def approximate_target_function_using_scipy_quad(self, x):
sections_sum = 0j
for current_section in self.sections:
def cauchy_integrand(alpha):
contour_pos = current_section.interpolate(alpha)
contour_derivative = current_section.evaluateDerivative(alpha)
return self.target_function(contour_pos) * contour_derivative / (contour_pos - x)
sections_sum += _complex_quad(cauchy_integrand, 0, 1)
return sections_sum / (2j * np.pi)
def _get_section(self, a):
jump_size = len(self.sections) // 2
current_pos = jump_size
jump_size //= 2
if jump_size == 0:
jump_size = 1
while True:
if a < self.sections[current_pos].start_a:
current_pos -= jump_size
elif current_pos == len(self.sections) - 1 or a < self.sections[current_pos].end_a:
return current_pos
else:
current_pos += jump_size
jump_size //= 2
if jump_size == 0:
jump_size = 1
def calc_max_error_in_interval(self, lower_i=-8, higher_i=8, samples=1000):
values = np.linspace(lower_i * 1j, higher_i * 1j, samples)
deviations = [abs(self.target_function(a) - self.approximate_target_function(a)) for a in values]
max_deviation = max(deviations)
return max_deviation
def calc_max_error_in_intervall_via_scipy_quad(self, lower_i=-8, higher_i=8, samples=1000):
values = np.linspace(lower_i * 1j, higher_i * 1j, samples)
deviations = [abs(self.target_function(a) - self.approximate_target_function_using_scipy_quad(a)) for a in
values]
max_deviation = max(deviations)
return max_deviation
def _calculate_quad_points_for_sections(self, arc_length_distribution):
if not arc_length_distribution:
self.terms_section_list = [self.terms_number // len(self.sections)] * len(self.sections)
return
self.terms_section_list = [0] * len(self.sections)
section_lengths = [s.arc_length_start_end(0, 1) for s in self.sections]
contour_length = reduce(float.__add__, section_lengths, 0.0)
length_per_quad_point = contour_length / self.terms_number
self.terms_section_list = [int(l / length_per_quad_point) for l in section_lengths]
current_terms = reduce(int.__add__, self.terms_section_list, 0)
for _ in range(0, self.terms_number - current_terms):
max_value = -1
max_i_so_far = 0
for i in range(0, len(section_lengths)):
current_length = section_lengths[i] - self.terms_section_list[i] * length_per_quad_point
if max_value < current_length:
max_i_so_far = i
max_value = current_length
self.terms_section_list[max_i_so_far] += 1
| {
"repo_name": "schreiberx/sweet",
"path": "mule_local/python/mule_local/rexi/pcirexi/cauchy_integrator.py",
"copies": "1",
"size": "8699",
"license": "mit",
"hash": 3628331937517759000,
"line_mean": 45.2712765957,
"line_max": 118,
"alpha_frac": 0.5763880906,
"autogenerated": false,
"ratio": 3.764171354392038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4840559444992038,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class AST:
def __init__(self, result, op=None, children=[]):
self._res = result
self._opr = op
self._chd = children
self._chr = str(result) if type(result) == int else str(result)[0]
def __repr__(self):
return ''.join(buchheim(self)._svg())
class DrawTree:
def __init__(self, tree, parent=None, depth=0, number=1):
self._x = -1.
self._y = depth
self._tre = tree
self._chd = [DrawTree(c, self, depth+1, i+1) for i, c in
enumerate(tree._chd)]
self._pnt = parent
self._thd = None
self._mod = 0
self._anc = self
self._chg = self._sft = 0
self._lmost_sibling = None
self._num = number
def left(self):
return self._thd or self._chd and self._chd[0]
def right(self):
return self._thd or self._chd and self._chd[-1]
def lbrother(self):
n = None
if self._pnt:
for node in self._pnt._chd:
if node == self: return n
n = node
return n
def get_lmost_sibling(self):
if not self._lmost_sibling and self._pnt and self != self._pnt._chd[0]:
self._lmost_sibling = self._pnt._chd[0]
return self._lmost_sibling
lmost_sibling = property(get_lmost_sibling)
def _pairs(self):
yield (self._x, self._y, self._tre)
for t in self._chd:
yield from t._pairs()
def _svg_edges(self, xfac, yfac, xofs, yofs):
x, y = self._x, self._y
if self._chd:
b = 'L%i %i' % (xofs + int(x*xfac), yofs - int(y*yfac))
for t in self._chd:
yield from t._svg_edges(xfac, yfac, xofs, yofs)
yield b
else:
yield 'M%i %i' % (xofs + int(x*xfac), yofs - int(y*yfac))
def _svg(self, xh=700, yh=700):
xmax, ymax = self.corner()
xofs = 100
yofs = yh - xofs
xfac, yfac = ((xh-2*xofs) / xmax if xmax else xh/2), (yh-2*xofs) / ymax if ymax else yh/2
yield '<svg height="%i" width="%i">\n' % (xh, yh)
yield '<path stroke-linecap="round" d="'
yield from self._svg_edges(xfac, yfac, xofs, yofs)
yield '" stroke="black" fill="none" stroke-width="3"/>\n'
for x, y, t in self._pairs():
x, y, c = xofs + int(x*xfac), yofs - int(y*yfac), t._chr
if t._chd:
ht,bs,ts,r = 30,20,5,t._opr
yield '<polygon points='
yield '"%i,%i %i,%i %i,%i"' % (x-bs,y-ht,x+bs,y-ht,x,y-ts)
yield ' stroke="black" stroke-width="2" fill="yellow"/>\n'
yield '<text x="%i" y="%i" ' % (x - len(r) * 3, y-18)
yield 'style="font-family: courier; font-size:12">%s</text>' % r
yield '<circle cx="%i" cy="%i" ' % (x, y)
yield 'r="15" stroke="black" stroke-width="2" fill="white"/>\n'
yield '<text x="%i" y="%i" ' % (x - len(c) * 4, y+5)
yield 'style="font-family: helvetica; font-size:15">%s</text>' % c
yield "</svg>"
def corner(self):
xmax, ymax = self._x, self._y
for t in self._chd:
txmx, tymx = t.corner()
xmax = max(txmx, xmax)
ymax = max(tymx, ymax)
return xmax, ymax
def buchheim(tree):
dt = firstwalk(DrawTree(tree))
min = second_walk(dt)
if min < 0: third_walk(dt, -min)
return dt
def third_walk(tree, n):
tree._x += 1
for c in tree._chd:
third_walk(c, n)
def firstwalk(v, dis=1.):
if not v._chd:
if v.lmost_sibling:
v._x = v.lbrother()._x + dis
else:
v._x = 0.
else:
default_anc = v._chd[0]
for w in v._chd:
firstwalk(w)
default_anc = apportion(w, default_anc, dis)
execute_shifts(v)
midpoint = (v._chd[0]._x + v._chd[-1]._x) / 2
ell, arr, w = v._chd[0], v._chd[-1], v.lbrother()
if w:
v._x = w._x + dis
v._mod = v._x - midpoint
else:
v._x = midpoint
return v
def apportion(v, default_anc, dis):
w = v.lbrother()
if w == None: return default_anc
vir = vor = v
vil, vol = w, v.lmost_sibling
sir = sor = v._mod
sil, sol = vil._mod, vol._mod
while vil.right() and vir.left():
vil, vir, vol, vor = vil.right(), vir.left(), vol.left(), vor.right()
vor._anc = v
shift = (vil._x + sil) - (vir._x + sir) + dis
if shift > 0:
wl = ancestor(vil, v, default_anc)
subtrees = v._num - wl._num
v._chg -= shift / subtrees
v._sft += shift
wl._chg += shift / subtrees
v._x += shift
v._mod += shift
sir += shift
sor += shift
sil += vil._mod
sir += vir._mod
sol += vol._mod
sor += vor._mod
if vil.right() and not vor.right():
vor._thd = vil.right()
vor._mod += sil - sor
else:
if vir.left() and not vol.left():
vol._thd = vir.left()
vol._mod += sir - sol
default_anc = v
return default_anc
def execute_shifts(v):
shift = change = 0
for w in v._chd[::-1]:
w._x += shift
w._mod += shift
change += w._chg
shift += w._sft + change
def ancestor(vil, v, default_anc):
return vil._anc if vil._anc in v._pnt._chd else default_anc
def second_walk(v, m=0, depth=0, minv=None):
v._x += m
v._y = depth
minv = min(v._x, minv) if minv != None else v._x
for w in v._chd:
minv = second_walk(w, m+v._mod, depth+1, minv)
return minv
| {
"repo_name": "oisdk/PyParse",
"path": "AST.py",
"copies": "1",
"size": "5730",
"license": "mit",
"hash": -264436776750541920,
"line_mean": 30.6574585635,
"line_max": 97,
"alpha_frac": 0.4856893543,
"autogenerated": false,
"ratio": 3.0592632140950347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4044952568395035,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class Solution:
def maxProduct(self, A):
B = A[::-1]
for i in range(1, len(A)):
A[i] *= A[i - 1] or 1
print(A)
B[i] *= B[i - 1] or 1
print(A, B)
return max(A + B)
# def maxProduct(self, nums):
# # print(nums)
# if not nums: return -float('inf')
# if len(nums) == 1: return nums[0]
# ch = []
# if 0 in nums:
# while 0 in nums:
# ch.append(nums[:nums.index(0)])
# nums = nums[1:]
# ch.append(nums)
# return max(max(map(self.maxProduct, ch)), 0)
# nn = [i for i, x in enumerate(nums) if x<0]
# if len(nn)%2 == 0: return reduce(lambda x, y: x*y, nums)
# else:
# s, e = nn[0], nn[-1]
# n1, n2 = nums[s+1:], nums[:e]
# m = -float('inf')
# if n1: m = max(m, reduce(lambda x, y: x*y, n1))
# if n2: m = max(m, reduce(lambda x, y: x*y, n2))
# return m
# print(Solution().maxProduct([-3,0,1,-2]))
# print(Solution().maxProduct([2,3,-2,4]))
# print(Solution().maxProduct([2,3,4,-2]))
print(Solution().maxProduct([-2,0,-1]))
# print(Solution().maxProduct([-2,-1,0]))
# print(Solution().maxProduct([-2,-1,0, 1, 2, 3, 0, 4, 5]))
| {
"repo_name": "zuun77/givemegoogletshirts",
"path": "leetcode/python/152_maximum-product-subarray.py",
"copies": "1",
"size": "1326",
"license": "apache-2.0",
"hash": 8819540323817469000,
"line_mean": 33.8947368421,
"line_max": 66,
"alpha_frac": 0.4607843137,
"autogenerated": false,
"ratio": 2.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8770762383875439,
"avg_score": 0.004671052631578947,
"num_lines": 38
} |
from functools import reduce
def factorial(n):
if n==1 or n==0:
return 1
else:
return n * factorial(n-1)
def jc(n):
return reduce(operator.mul, range(1,10))
print(factorial(0))
print(factorial(5))
def is_palindromes(s):
return s==s[::-1]
print(is_palindromes('abba'))
print(is_palindromes('abba'))
print(is_palindromes('abca'))
def rabbits(n):
if n==1 or n==2:
return 1
else:
if n<=5:
return rabbits(n-1)+rabbits(n-2)
else:
return rabbits(n-1)+rabbits(n-2)-rabbits(n-5)
print(rabbits(10))
def hexes_to_udaciousness(n, spread, target):
if target < n:
return 0
else:
if target>n*(1+spread):
return hexes_to_udaciousness(n*(1+spread), spread, target)+1
else:
return 1
print(hexes_to_udaciousness(100000, 2, 36230) )
print (hexes_to_udaciousness(50000, 2, 150001))
print (hexes_to_udaciousness(20000, 2, 7 * 10 ** 9) )
print(hexes_to_udaciousness(15000, 3, 7 * 10 ** 9))
def is_list(p):
return isinstance(p, list)
def deep_count(p):
for entry in p:
if is_list(entry)==True:
return len(p)+deep_count(entry)
return len(p)
print(deep_count([1, 2, 3]))
print(deep_count([1, [], 3]))
print(deep_count([1, [1, 2, [3, 4]]]))
print(deep_count([[[[[[[[1, 2, 3]]]]]]]]))
| {
"repo_name": "coodoing/udacity-searchengine",
"path": "course6_Recursive.py",
"copies": "1",
"size": "1352",
"license": "apache-2.0",
"hash": -6544932827625335000,
"line_mean": 21.5333333333,
"line_max": 72,
"alpha_frac": 0.5813609467,
"autogenerated": false,
"ratio": 2.6509803921568627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8663131464375562,
"avg_score": 0.013841974896260132,
"num_lines": 60
} |
from functools import reduce
from botlang.ast.ast_visitor import ASTVisitor
from botlang.evaluation.values import *
class ExecutionStack(list):
def print_trace(self):
from botlang.macros.default_macros import DefaultMacros
return reduce(
lambda a, n: a + n + '\n',
[
self.frame_message(frame) for frame in self
if frame.s_expr.source_reference.source_id !=
DefaultMacros.DEFAULT_MACROS_SOURCE_ID
],
''
)
@classmethod
def frame_message(cls, frame):
return '\tModule "{0}", line {1}, in {2}:\n\t\t{3}'.format(
frame.s_expr.source_reference.source_id,
frame.s_expr.source_reference.start_line,
frame.print_node_type(),
frame.s_expr.code.split('\n')[0]
)
class Evaluator(ASTVisitor):
"""
AST visitor for evaluation
"""
def __init__(self, module_resolver=None):
if module_resolver is None:
raise Exception('Module resolver required')
self.module_resolver = module_resolver
self.execution_stack = ExecutionStack()
def visit_val(self, val_node, env):
"""
Value expression evaluation
"""
return val_node.value
def visit_list(self, literal_list, env):
return [
element.accept(self, env) for element in literal_list.elements
]
def visit_if(self, if_node, env):
"""
'If' construct evaluation
"""
self.execution_stack.append(if_node)
if if_node.cond.accept(self, env):
self.execution_stack.pop()
return if_node.if_true.accept(self, env)
else:
self.execution_stack.pop()
return if_node.if_false.accept(self, env)
def visit_cond(self, cond_node, env):
"""
'Cond' conditional evaluation
"""
self.execution_stack.append(cond_node)
value = None
for clause in cond_node.cond_clauses:
value = clause.accept(self, env)
if value is not None:
break
self.execution_stack.pop()
return value
def visit_cond_predicate_clause(self, predicate_node, env):
"""
'Cond' predicate clause evaluation
"""
self.execution_stack.append(predicate_node)
value = None
if predicate_node.predicate.accept(self, env):
value = predicate_node.then_body.accept(self, env)
self.execution_stack.pop()
return value
def visit_cond_else_clause(self, else_node, env):
"""
'Cond' else clause evaluation
"""
self.execution_stack.append(else_node)
value = else_node.then_body.accept(self, env)
self.execution_stack.pop()
return value
def visit_and(self, and_node, env):
"""
Logical 'and' evaluation
"""
self.execution_stack.append(and_node)
left_branch = and_node.cond1.accept(self, env)
result = left_branch and and_node.cond2.accept(self, env)
self.execution_stack.pop()
return result
def visit_or(self, or_node, env):
"""
Logical 'or' evaluation
"""
self.execution_stack.append(or_node)
left_branch = or_node.cond1.accept(self, env)
result = left_branch or or_node.cond2.accept(self, env)
self.execution_stack.pop()
return result
def visit_id(self, id_node, env):
"""
Identifier (variable name) resolution
"""
self.execution_stack.append(id_node)
identifier = env.lookup(id_node.identifier)
self.execution_stack.pop()
return identifier
def visit_fun(self, fun_node, env):
"""
Function expression evaluation.
Returns closure
"""
self.execution_stack.append(fun_node)
closure = Closure(fun_node, env, self)
self.execution_stack.pop()
return closure
def visit_bot_node(self, bot_node, env):
"""
Bot node expression evaluation.
Returns bot-node closure
"""
self.execution_stack.append(bot_node)
bot_node = BotNodeValue(bot_node, env, self)
self.execution_stack.pop()
return bot_node
def visit_bot_result(self, bot_result_node, env):
"""
Bot result evaluation. Returns a BotResultValue which can be used
to resume execution in the future.
"""
self.execution_stack.append(bot_result_node)
data = bot_result_node.data.accept(self, env)
message = bot_result_node.message.accept(self, env)
next_node = bot_result_node.next_node.accept(self, env)
bot_result_value = BotResultValue(
data,
message,
next_node
)
self.execution_stack.pop()
return bot_result_value
def visit_app(self, app_node, env):
"""
Function application evaluation.
"""
self.execution_stack.append(app_node)
fun_val = app_node.fun_expr.accept(self, env)
if not isinstance(fun_val, FunVal):
raise Exception(
'Invalid function application: {0} is not a function'.format(
fun_val
)
)
arg_vals = [arg.accept(self, env) for arg in app_node.arg_exprs]
if fun_val.is_reflective():
result = fun_val.apply(env, *arg_vals)
else:
result = fun_val.apply(*arg_vals)
self.execution_stack.pop()
return result
def visit_body(self, body_node, env):
"""
Evaluation of a sequence of expressions
"""
self.execution_stack.append(body_node)
for expr in body_node.expressions[0:-1]:
expr.accept(self, env)
result = body_node.expressions[-1].accept(self, env)
self.execution_stack.pop()
return result
def visit_definition(self, def_node, env):
"""
Definition evaluation.
Mutates the environment with this definition.
Evaluates the definition body with the same environment
that is mutated, which allows recursion.
Doesn't return a value.
"""
self.execution_stack.append(def_node)
env.update(
{def_node.name: def_node.expr.accept(self, env)}
)
self.execution_stack.pop()
def visit_local(self, local_node, env):
"""
Local definition evaluation
"""
self.execution_stack.append(local_node)
new_env = env.new_environment()
for definition in local_node.definitions:
definition.accept(self, new_env)
result = local_node.body.accept(self, new_env)
self.execution_stack.pop()
return result
def visit_module_definition(self, module_node, env):
"""
Module definition
"""
self.execution_stack.append(module_node)
from botlang.modules.module import BotlangModule
module = BotlangModule(
module_node.name.accept(self, env),
module_node.body
)
self.module_resolver.add_module(module)
self.execution_stack.pop()
return module
def visit_module_function_export(self, provide_node, env):
"""
Module function's export
"""
raise NotInModuleContextException()
def visit_module_import(self, require_node, env):
"""
Import a module into scope
"""
self.execution_stack.append(require_node)
module_name = require_node.module_name.accept(self, env)
bindings = self.module_resolver.get_bindings(self, module_name)
env.update(bindings)
self.execution_stack.pop()
return Nil
class NotInModuleContextException(Exception):
def __init__(self):
super(NotInModuleContextException, self).__init__(
'The "provide" keyword must appear in a top-level module context'
)
| {
"repo_name": "PostCenter/botlang",
"path": "botlang/evaluation/evaluator.py",
"copies": "1",
"size": "8096",
"license": "mit",
"hash": -9071233875668719000,
"line_mean": 29.4360902256,
"line_max": 77,
"alpha_frac": 0.5805335968,
"autogenerated": false,
"ratio": 4.074484146955209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155017743755209,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from collections import defaultdict
class Ref(tuple):
def __new__(cls, name, id):
return super().__new__(cls, (name,id))
@property
def name(self):
return self[0]
@property
def id(self):
return self[1]
def __str__(self):
return
class Container:
"""
containter class creating element ids sparsly
>>> c = Container(['a','b','c'])
>>> c
[0: 'a',
1: 'b',
2: 'c']
>>> c[2]
'c'
>>> c.lookup('a')
0
>>> c.remove('b')
1
>>> c
[0: 'a',
2: 'c']
>>> c.add('d')
1
>>> c
[0: 'a',
1: 'd',
2: 'c']
>>> c.add('d')
1
"""
def __init__(self, items=[]):
self._free = []
self._items = {}
self._nums = {}
self.extend(items)
def add(self, item):
try:
return self._nums[item]
except KeyError:
if self._free:
i = self._free.pop()
else:
i = len(self._nums)
self._nums[item] = i
self._items[i] = item
return i
def extend(self, items):
for i in items:
self.add(i)
def lookup(self, item):
return self._nums[item]
def remove(self, item):
i = self._nums.pop(item)
self._items.pop(i)
self._free.append(i)
return i
def __getitem__(self, i):
return self._items[i]
def __delitem__(self, i):
item = self[i]
self.remove(item)
def __iter__(self):
return map(lambda x: x[1], self.items())
def items(self):
return sorted(self._items.items())
def __str__(self):
return '[{}]'.format(', '.join(map(str, self)))
def __repr__(self):
its = ('{}: {}'.format(*it) for it in self.items())
return '[{}]'.format('\n '.join(its))
class RefersMixin:
def _remove_ref(self, ref, i, refdct=None):
if refdct is None:
refdct = self._refers
refdct[ref].remove(i)
if not refdct[ref]:
refdct.pop(ref)
return i
def _reduce_refs(self, refsets):
return reduce(set.intersection, refsets)
class RefContainer(RefersMixin, Container):
"""
containter with elements refered by an outside reference
"""
def __init__(self, items={}):
self._refers = defaultdict(set)
self._refered = defaultdict(set)
super().__init__(items=items)
def add(self, item, ref):
i = super().add(item)
self._refers[ref].add(i)
self._refered[i].add(ref)
return i
def extend(self, items):
for item, ref in items.items():
self.add(item, ref)
def remove(self, item):
i = super().remove(item)
for ref in self._refered[i]:
self._remove_ref(ref, i)
return i
def refers(self, ref):
return self._reduce_refs([self._refers[ref]])
class AssocContainer(RefersMixin, Container):
"""
containter where elements are namedtuples with components refering to tuple
"""
def __init__(self, items=[]):
self._assocs = defaultdict(lambda: defaultdict(set))
super().__init__(items=items)
def add(self, item):
i = super().add(item)
for name, ref in item._asdict().items():
self._assocs[name][ref].add(i)
return i
def remove(self, item):
i = super().remove(item)
for name, ref in item._asdict().items():
self._remove_ref(ref, i, refdct=self._assocs[name])
return i
def refers(self, **refs):
return self._reduce_refs(self._assocs[name].get(val, set())
for name, val in refs.items())
class SetsContainer(RefersMixin, Container):
"""
containter where items of element sets refer to there containing sets
"""
def __init__(self, items=[]):
self._refers = defaultdict(set)
super().__init__(items=items)
def add(self, items):
items = tuple(set(items))
i = super().add(items)
for ref in items:
self._refers[ref].add(i)
return i
def lookup(self, items):
items = tuple(set(items))
return super().lookup(items)
def __getitem__(self, i):
return set(super().__getitem__(i))
def remove(self, items):
items = tuple(set(items))
i = super().remove(items)
for ref in items:
self._remove_ref(ref, i)
return i
def refers(self, *items):
return self._reduce_refs(self._refers[ref] for ref in items)
def dismiss(self, item):
for i in self.refers(item):
items = self[i]
self.remove(items)
items.remove(item)
if items:
j = self.add(items)
assert i == j
yield i, items
class GroupedContainer(SetsContainer):
def join(self, *items):
new = set(items)
grps = [grp for grp in self
if new.intersection(grp)]
if len(grps) > 1:
raise IndexError('items of different groups cannot be joined')
elif grps:
grp, = grps
i = self.remove(grp)
j = self.add(new.union(grp))
assert i == j
else:
self.add(items)
def part(self, *items):
new = set(items)
for grp in self:
if len(new.intersection(grp)) > 1:
raise IndexError('cannot part items of the same group')
new -= set(grp)
for item in new:
self.add({item})
| {
"repo_name": "wabu/zeroflo",
"path": "zeroflo/top/container.py",
"copies": "1",
"size": "5718",
"license": "mit",
"hash": 9039788506574943000,
"line_mean": 23.4358974359,
"line_max": 79,
"alpha_frac": 0.5050717034,
"autogenerated": false,
"ratio": 3.771767810026385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4776839513426385,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from collections import defaultdict
from ..utils.common import getCurieFromVal, dict_set2list
from .bioentity import BioEntity
class BioThingsAPI:
MAX_BATCH_SIZE = 1000
def __init__(self, metadata):
self._process_metadata(metadata)
def _process_metadata(self, metadata):
self.url = metadata.get("url")
self.mapping = metadata.get("mapping")
self.rank = metadata.get("id_ranks")
def _get_all_fields(self):
return reduce(lambda x, y: x + y, self.mapping.values(), [])
def _get_scope(self, prefix):
return self.mapping.get(prefix)
def build_query(self, prefix, ids):
query = "q={inputs}&scopes={scopes}&fields={fields}&dotfield=true&species=human"
for i in range(0, len(ids), self.MAX_BATCH_SIZE):
yield (
query.replace("{inputs}", ",".join(ids[i : i + self.MAX_BATCH_SIZE]))
.replace("{scopes}", ",".join(self._get_scope(prefix)))
.replace("{fields}", ",".join(self._get_all_fields()))
)
def __get_db_ids_helper(self, record):
res = defaultdict(set)
for k, v in self.mapping.items():
for field_name in v:
if field_name in record:
if isinstance(record[field_name], list):
for val in record[field_name]:
res[k].add(str(val))
else:
res[k].add(str(record[field_name]))
return dict_set2list(res)
def get_db_ids(self, prefix, semantic_type, response):
result = {}
for rec in response:
if "notfound" not in rec:
curie = getCurieFromVal(rec.get("query"), prefix)
result[curie] = BioEntity(semantic_type, self.__get_db_ids_helper(rec))
return result
| {
"repo_name": "biothings/biothings_explorer",
"path": "biothings_explorer/resolve_ids/api.py",
"copies": "1",
"size": "1882",
"license": "apache-2.0",
"hash": 2542285772423409000,
"line_mean": 35.1923076923,
"line_max": 88,
"alpha_frac": 0.5632306057,
"autogenerated": false,
"ratio": 3.8329938900203664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.989181532212678,
"avg_score": 0.0008818347187173945,
"num_lines": 52
} |
from functools import reduce
from collections import deque
from operator import getitem, setitem
def nested_enumerate(lst):
"""An analogue of enumerate for nested lists.
Returns an iterator over the (index, element) pairs of `lst` where
`index` is a list of integers [x0, x1,.., xn] such that
`lst[x0][x1]...[xn]==element`
>>> for i, e in nested_enumerate([0, [[[1, [2, [[[3]]]]]]], [[[4]]]]):
print('%s %s'%(str(i), str(e)))
[0] 0
[1, 0, 0, 0] 1
[1, 0, 0, 1, 0] 2
[1, 0, 0, 1, 1, 0, 0, 0] 3
[2, 0, 0, 0] 4
"""
# initial, partial index of lst
partial_index = deque([([i], e) for (i, e) in enumerate(lst)])
while partial_index:
index, obj = partial_index.popleft()
if isinstance(obj, list):
# if obj is a list then its elements require further indexing
new_dimension = [(index+[i], e) for (i, e) in enumerate(obj)]
partial_index.extendleft(reversed(new_dimension))
else:
# obj is fully indexed
yield index, obj
# complementary functions #
def nested_getitem(lst, index):
"""Returns lst[index[0]]...[index[n]]"""
return reduce(getitem, index, lst)
def nested_setitem(lst, index, value):
"""Equivalent to the statement lst[index[0]]...[index[n]]=value"""
setitem(
reduce(getitem, index[0:-1], lst), index[-1], value
)
# quick test #
deeplist = [0, [[[1, [2, [[[3]]]]]]], [[[4]]]]
for index, element in nested_enumerate(deeplist):
assert nested_getitem(deeplist, index)==element
# example usage: applying a function to each element in a nested list #
square = lambda x: x**2
for index, element in nested_enumerate(deeplist):
nested_setitem(deeplist, index, square(element))
assert deeplist==[0, [[[1, [4, [[[9]]]]]]], [[[16]]]]
# not recommended, but demonstrates different ways of traversing a list
# (plus, we all love flatten, right? ;-)
def flatten(lst):
return [e for (i, e) in nested_enumerate(lst)]
def flatten2(lst):
return [nested_getitem(lst, i) for (i, e) in nested_enumerate(lst)]
assert flatten(deeplist)==flatten2(deeplist)==[0, 1, 4, 9, 16]
# sort elements based on their depth of nesting, with deepest first
depthfirst = [e for (i, e) in sorted(nested_enumerate(deeplist), key=lambda (i, e):-len(i))]
assert depthfirst == [9, 4, 1, 16, 0]
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578376_analogue_enumerate_nested/recipe-578376.py",
"copies": "1",
"size": "2423",
"license": "mit",
"hash": -2062685325794573800,
"line_mean": 29.2875,
"line_max": 92,
"alpha_frac": 0.5984316962,
"autogenerated": false,
"ratio": 3.256720430107527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4355152126307527,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from collections import KeysView
class Node:
def __init__(self, value):
self.value = value
def get_child(self, key):
return NoneNode()
def keys(self):
return KeysView([])
def __str__(self):
return "<{}>".format(self.value)
def __eq__(self, other):
return isinstance(other, Node) and other.value == self.value
def __len__(self):
return 1
def __hash__(self):
return 19 + hash(self.value)
def __getitem__(self, key):
return self.get_child(key)
class Tree(Node):
def __init__(self, value, children):
super().__init__(value)
self.children = children
self.__children_length = sum(len(child) for child in children)
self.__children_hash = sum(hash(child) for child in children)
def get(self, path):
return reduce(lambda node, key: node.get_child(key), path.split('/'), self)
def get_child(self, key):
return next((node for node in self.children if node.value == key), NoneNode())
def keys(self):
return KeysView([child.value for child in self.children])
def __str__(self):
return '<T: {} (with {} nodes)>'.format(self.value, len(self.children))
def __eq__(self, other):
return isinstance(other, Tree) and super().__eq__(other) and other.children == self.children
def __len__(self):
return super().__len__() + self.__children_length
def __hash__(self):
return 13 + super().__hash__() + self.__children_hash
class LeafTree(Tree):
def __init__(self, value, leaf):
super().__init__(value, [leaf])
self.leaf = leaf
class LeafNode(Node):
def __str__(self):
return '<L: {}>'.format(self.value)
class NoneNode(Node):
def __init__(self):
super().__init__(None)
| {
"repo_name": "hiroara/tree-python",
"path": "tree/structs.py",
"copies": "1",
"size": "1857",
"license": "mit",
"hash": -3524719577075760000,
"line_mean": 24.4383561644,
"line_max": 100,
"alpha_frac": 0.577813678,
"autogenerated": false,
"ratio": 3.8288659793814435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4906679657381443,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from collections import namedtuple
from itertools import chain
from graphics import save_svg
Point = namedtuple("Point", ["x", "y"])
class Map():
"""
Representing a map which contains walls, a starting point and a target.
"""
def __init__(self, max_acceleration, filename=""):
"""
Creates a map.
max_acceleration: the maximum acceleration allowed on the map
filename: if given loads the map directly
"""
self.map = list()
self.start = Point(0, 0)
self.target = Point(0, 0)
self.size_x = 0
self.size_y = 0
self.max_acceleration = int(max_acceleration)
if filename:
self.load(filename)
def __repr__(self):
return "Dimensions: {}, {} Walls: {}".format(self.size_x, self.size_y, self.map)
def add_line(self, p, q):
"""
Adds a line from p to q.
p: line point
q: line point
"""
self.map.append((p, q))
def load(self, filename):
"""
Loads a map from a file.
filename: the filename of the map file
"""
for line in open(filename):
split_line = line.split()
if line[0] == "#":
pass
elif split_line[0].lower() == "w":
# update dimensions
self.size_x = max(self.size_x, int(split_line[1]))
self.size_x = max(self.size_x, int(split_line[3]))
self.size_y = max(self.size_x, int(split_line[2]))
self.size_y = max(self.size_x, int(split_line[4]))
p = Point(int(split_line[1]), int(split_line[2]))
q = Point(int(split_line[3]), int(split_line[4]))
self.add_line(p, q)
elif split_line[0].lower() == "s":
self.start = Point(int(split_line[1]), int(split_line[2]))
elif split_line[0].lower() == "t":
self.target = Point(int(split_line[1]), int(split_line[2]))
else:
raise Exception("Unsupported character at the beginning of line: " + line)
| {
"repo_name": "Faerbit/evol-racer",
"path": "map.py",
"copies": "1",
"size": "2164",
"license": "mit",
"hash": 2312067548752440000,
"line_mean": 31.7878787879,
"line_max": 90,
"alpha_frac": 0.5231053604,
"autogenerated": false,
"ratio": 3.770034843205575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783723061877063,
"avg_score": 0.0018834283457025598,
"num_lines": 66
} |
from functools import reduce
from datetime import datetime
from operator import and_
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from celery import states
from pytz import utc
from vms.models import Dc, Vm, Node, NodeStorage, Subnet, Image, VmTemplate, Iso, TaskLogEntry
from pdns.models import Domain
from gui.models import User, Role
from que import TT_AUTO
from api import serializers as s
from api.task.log import get_task_types
TASK_LOG_MODELS = (Dc, Vm, Node, NodeStorage, Subnet, Image, VmTemplate, Iso, Domain, User, Role)
TASK_STATES = (
('', _('Status (all)')),
(states.PENDING, _(states.PENDING)),
(states.SUCCESS, _(states.SUCCESS)),
(states.FAILURE, _(states.FAILURE)),
(states.REVOKED, _(states.REVOKED)),
)
# TODO: Switch to filtering based on object_type => add index(content_type_model) + remove content_type field
# TODO: That means that in the meantime we don't have filters on logs for dummy task models
# noinspection PyProtectedMember,PyUnresolvedReferences
TASK_OBJECT_TYPES = [('', _('Object type (all)'))] + \
[(m._meta.model_name, m._meta.verbose_name) for m in TASK_LOG_MODELS]
class TaskLogEntrySerializer(s.ModelSerializer):
"""
Serializes vms.models.TaskLogEntry
"""
username = s.Field(source='get_username')
object_name = s.Field(source='get_object_name')
object_alias = s.Field(source='get_object_alias')
object_type = s.Field(source='object_type')
class Meta:
model = TaskLogEntry
fields = ('time', 'task', 'status', 'username', 'msg', 'detail',
'object_name', 'object_alias', 'object_type', 'flag')
class TaskCancelSerializer(s.Serializer):
force = s.BooleanField(default=False)
class TaskLogFilterSerializer(s.Serializer):
_content_type = None
_object_pks = None
status = s.ChoiceField(label=_('Status'), required=False, choices=TASK_STATES)
object_type = s.ChoiceField(source='content_type', label=_('Object type'), required=False,
choices=TASK_OBJECT_TYPES)
object_name = s.CharField(label=_('Object name'), max_length=2048, required=False)
show_running = s.BooleanField(label=_('Show only running tasks'), required=False, default=False)
hide_auto = s.BooleanField(label=_('Hide automatic tasks'), required=False, default=False)
date_from = s.DateField(label=_('Since'), required=False)
date_to = s.DateField(label=_('Until'), required=False)
def validate(self, attrs):
object_type = attrs.get('content_type', None)
object_name = attrs.get('object_name', None)
# object_name depends on object_type
if object_name:
if not object_type:
self._errors['object_type'] = s.ErrorList([_('object_type attribute is required when '
'filtering by object_name.')])
return attrs
self._content_type = content_type = ContentType.objects.get(model=object_type)
model_class = content_type.model_class()
lookup_kwargs = model_class.get_log_name_lookup_kwargs(object_name)
filter_kwargs = {key + '__icontains': val for key, val in lookup_kwargs.items()}
self._object_pks = list(model_class.objects.filter(**filter_kwargs).values_list('pk', flat=True))
return attrs
def get_filters(self, pending_tasks=()):
if self._object_pks is not None and not self._object_pks: # Means that we want to return empty filter results
return False
tz = timezone.get_current_timezone()
data = self.object
query = []
date_from = data.get('date_from')
if date_from:
date_from = datetime.combine(date_from, datetime.min.time())
query.append(Q(time__gte=date_from.replace(tzinfo=utc).astimezone(tz)))
date_to = data.get('date_to')
if date_to:
date_to = datetime.combine(date_to, datetime.min.time())
query.append(Q(time__lte=date_to.replace(tzinfo=utc).astimezone(tz)))
if self._object_pks:
query.append(Q(object_pk__in=self._object_pks))
status = data.get('status')
if status:
query.append(Q(status=status))
if data.get('show_running'):
query.append(Q(task__in=pending_tasks))
object_type = data.get('content_type')
if object_type:
if self._content_type:
content_type = self._content_type
else:
content_type = ContentType.objects.get(model=object_type)
query.append(Q(content_type=content_type))
if data.get('hide_auto'):
query.append(~Q(task_type__in=get_task_types(tt=(TT_AUTO,))))
if query:
return reduce(and_, query)
else:
return None
| {
"repo_name": "erigones/esdc-ce",
"path": "api/task/serializers.py",
"copies": "1",
"size": "5023",
"license": "apache-2.0",
"hash": -6604186334997036000,
"line_mean": 38.2421875,
"line_max": 118,
"alpha_frac": 0.6336850488,
"autogenerated": false,
"ratio": 3.8285060975609757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4962191146360976,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from datetime import datetime
import operator
import re
from django_select2.views import AutoResponseView
from django.contrib.auth.models import Group
from django.conf.urls import url
from django.db.models import Q, Count
from fiscal.models import MembershipPersonRole
from workshops import models
from workshops.util import OnlyForAdminsNoRedirectMixin, LoginNotRequiredMixin
class TagLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
q = models.Tag.objects.all()
if self.term:
return q.filter(name__icontains=self.term)
return q
class BadgeLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
q = models.Badge.objects.all()
if self.term:
return q.filter(
Q(name__icontains=self.term) | Q(title__icontains=self.term)
)
return q
class LessonLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
q = models.Lesson.objects.all()
if self.term:
return q.filter(name__icontains=self.term)
return q
class EventLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Event.objects.all()
if self.term:
results = results.filter(slug__icontains=self.term)
return results
class TTTEventLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Event.objects.filter(tags__name="TTT")
if self.term:
results = results.filter(slug__icontains=self.term)
return results
class OrganizationLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Organization.objects.order_by("fullname")
if self.term:
results = results.filter(
Q(domain__icontains=self.term) | Q(fullname__icontains=self.term)
)
return results
class AdministratorOrganizationLookupView(
OnlyForAdminsNoRedirectMixin, AutoResponseView
):
def get_queryset(self):
results = models.Organization.objects.administrators()
if self.term:
results = results.filter(
Q(domain__icontains=self.term) | Q(fullname__icontains=self.term)
)
return results
class MembershipLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Membership.objects.all()
if self.term:
# parse query into date
try:
date = datetime.strptime(self.term, "%Y-%m-%d").date()
except ValueError:
date = None
# filter by organization name
org_q = Q(organization__domain__icontains=self.term) | Q(
organization__fullname__icontains=self.term
)
# filter by variant
variant_q = Q(variant__icontains=self.term)
if date:
# filter by agreement date range
agreement_q = Q(agreement_start__lte=date, agreement_end__gte=date)
results = results.filter(org_q | variant_q | agreement_q)
else:
results = results.filter(org_q | variant_q)
return results
class MemberRoleLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
q = models.MemberRole.objects.all()
if self.term:
return q.filter(
Q(name__icontains=self.term) | Q(verbose_name__icontains=self.term)
)
return q
class MembershipPersonRoleLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
q = MembershipPersonRole.objects.all()
if self.term:
return q.filter(
Q(name__icontains=self.term) | Q(verbose_name__icontains=self.term)
)
return q
class PersonLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Person.objects.all()
if self.term:
filters = [
Q(personal__icontains=self.term),
Q(family__icontains=self.term),
Q(email__icontains=self.term),
Q(secondary_email__icontains=self.term),
Q(username__icontains=self.term),
]
# split query into first and last names
tokens = re.split(r"\s+", self.term)
if len(tokens) == 2:
name1, name2 = tokens
complex_q = (
Q(personal__icontains=name1) & Q(family__icontains=name2)
) | (Q(personal__icontains=name2) & Q(family__icontains=name1))
filters.append(complex_q)
# this is brilliant: it applies OR to all search filters
results = results.filter(reduce(operator.or_, filters))
return results
class AdminLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
"""The same as PersonLookup, but allows only to select administrators.
Administrator is anyone with superuser power or in "administrators" group.
"""
def get_queryset(self):
admin_group = Group.objects.get(name="administrators")
results = models.Person.objects.filter(
Q(is_superuser=True) | Q(groups__in=[admin_group])
)
if self.term:
results = results.filter(
Q(personal__icontains=self.term)
| Q(family__icontains=self.term)
| Q(email__icontains=self.term)
| Q(secondary_email__icontains=self.term)
| Q(username__icontains=self.term)
)
return results
class AirportLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
def get_queryset(self):
results = models.Airport.objects.all()
if self.term:
results = results.filter(
Q(iata__icontains=self.term) | Q(fullname__icontains=self.term)
)
return results
class LanguageLookupView(LoginNotRequiredMixin, AutoResponseView):
def dispatch(self, request, *args, **kwargs):
self.subtag = "subtag" in request.GET.keys()
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
results = models.Language.objects.all()
if self.term:
results = results.filter(
Q(name__icontains=self.term) | Q(subtag__icontains=self.term)
)
if self.subtag:
return results.filter(subtag__iexact=self.term)
results = results.annotate(person_count=Count("person")).order_by(
"-person_count"
)
return results
class TrainingRequestLookupView(OnlyForAdminsNoRedirectMixin, AutoResponseView):
"""The same as PersonLookup, but allows only to select administrators.
Administrator is anyone with superuser power or in "administrators" group.
"""
def get_queryset(self):
results = models.TrainingRequest.objects.all()
if self.term:
# search for name if two words provided
tok = re.split(r"\s+", self.term)
if len(tok) == 2:
name_q = Q(personal__icontains=tok[0], family__icontains=tok[1]) | Q(
personal__icontains=tok[1], family__icontains=tok[0]
)
else:
# empty Q
name_q = Q(id=0)
results = results.filter(
Q(personal__icontains=self.term)
| Q(family__icontains=self.term)
| Q(email__icontains=self.term)
| Q(secondary_email__icontains=self.term)
| name_q
)
return results
urlpatterns = [
url(r"^tags/$", TagLookupView.as_view(), name="tag-lookup"),
url(r"^badges/$", BadgeLookupView.as_view(), name="badge-lookup"),
url(r"^lessons/$", LessonLookupView.as_view(), name="lesson-lookup"),
url(r"^events/$", EventLookupView.as_view(), name="event-lookup"),
url(r"^ttt_events/$", TTTEventLookupView.as_view(), name="ttt-event-lookup"),
url(
r"^organizations/$",
OrganizationLookupView.as_view(),
name="organization-lookup",
),
url(
r"^admin_orgs/$",
AdministratorOrganizationLookupView.as_view(),
name="administrator-org-lookup",
),
url(r"^memberships/$", MembershipLookupView.as_view(), name="membership-lookup"),
url(r"^member-roles/$", MemberRoleLookupView.as_view(), name="memberrole-lookup"),
url(
r"^membership-person-roles/$",
MembershipPersonRoleLookupView.as_view(),
name="membershippersonrole-lookup",
),
url(r"^persons/$", PersonLookupView.as_view(), name="person-lookup"),
url(r"^admins/$", AdminLookupView.as_view(), name="admin-lookup"),
url(r"^airports/$", AirportLookupView.as_view(), name="airport-lookup"),
url(r"^languages/$", LanguageLookupView.as_view(), name="language-lookup"),
url(
r"^training_requests/$",
TrainingRequestLookupView.as_view(),
name="trainingrequest-lookup",
),
]
| {
"repo_name": "swcarpentry/amy",
"path": "amy/workshops/lookups.py",
"copies": "1",
"size": "9342",
"license": "mit",
"hash": -7664379429956278000,
"line_mean": 31.4375,
"line_max": 86,
"alpha_frac": 0.6125026761,
"autogenerated": false,
"ratio": 4.070588235294117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5183090911394117,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from datetime import datetime
import re
from flask import Blueprint, render_template, request, redirect, url_for
from flask.ext.login import login_required, current_user
from bnd.forms import CheckpointEvaluationForm
from bnd.models import Checkpoint, Team, Goal, Evaluation, \
CheckpointEvaluation, db
checkpoint_module = Blueprint(
'checkpoint', __name__, template_folder='templates/checkpoint')
@checkpoint_module.route('/<int:checkpoint_id>')
@login_required
def view(checkpoint_id):
team_id, goal_id = map(request.args.get, ['team_id', 'goal_id'])
checkpoint = Checkpoint.get_or_404(checkpoint_id)
team = Team.get_or_404(team_id)
goals = Goal.query.filter_by(team_id=team.id, user_id=current_user.id)
checkpoint_eval = CheckpointEvaluation.query.filter_by(
user_id=current_user.id, checkpoint_id=checkpoint_id)
form = CheckpointEvaluationForm(obj=checkpoint_eval)
evaluations = {}
# FIXME: Revise the following section; use a JOIN statement
for goal in goals:
evaluations[goal.id] = Evaluation.fetch(current_user.id,
checkpoint.id, goal.id)
has_evaluations = reduce(lambda x, y: x and y,
[v is not None for v in evaluations.values()])
context = dict(
form=form,
checkpoint=checkpoint,
team=team,
goals=goals,
evaluations=evaluations,
has_evaluations=has_evaluations,
checkpoint_evaluation=checkpoint_eval,
)
return render_template('checkpoint/view.html', **context)
@checkpoint_module.route('/<int:checkpoint_id>/evaluate', methods=['POST'])
@login_required
def evaluate(checkpoint_id):
"""This will be called when a user clicks a submit button on the
checkpoint.view page."""
team_id = request.args.get('team_id')
checkpoint = Checkpoint.get_or_404(checkpoint_id)
team = Team.get_or_404(team_id)
# Process CheckpointEvaluation first
attendance = request.form.get('attendance', 'na')
essay = request.form.get('essay', 'na')
data = dict(attendance=attendance, essay=essay)
checkpoint_evaluation = CheckpointEvaluation.query.filter_by(
user_id=current_user.id,
checkpoint_id=checkpoint.id,
).first()
if checkpoint_evaluation is None:
CheckpointEvaluation.create(
user_id=current_user.id,
checkpoint_id=checkpoint.id,
data=data,
)
else:
checkpoint_evaluation.timestamp = datetime.utcnow()
checkpoint_evaluation.data = data
db.session.commit()
# Then process Evaluation
for k, v in request.form.items():
m = re.match(r'goal-(?P<goal_id>\d+)', k)
if m is not None:
goal_id = m.group('goal_id')
evaluation = Evaluation.query.filter_by(
user_id=current_user.id,
checkpoint_id=checkpoint.id,
goal_id=goal_id).first()
if evaluation is None:
Evaluation.create(
score=v,
user=current_user,
checkpoint=checkpoint,
goal_id=goal_id,
)
else:
evaluation.timestamp = datetime.utcnow()
evaluation.score = v
db.session.commit()
return redirect(url_for('checkpoint.view', checkpoint_id=checkpoint.id,
team_id=team.id))
| {
"repo_name": "suminb/bnd",
"path": "bnd/checkpoint.py",
"copies": "1",
"size": "3521",
"license": "mit",
"hash": -1881775025371914500,
"line_mean": 31.6018518519,
"line_max": 75,
"alpha_frac": 0.6197103096,
"autogenerated": false,
"ratio": 3.9253065774804905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 108
} |
from functools import reduce
from dateutil.parser import parse
from django.db.models import Case, Count, F, IntegerField, Manager, Max, Sum, When
from kolibri.auth.models import FacilityUser
from kolibri.content.models import ContentNode
from kolibri.logger.models import ContentSummaryLog
from kolibri.core.lessons.models import Lesson
from le_utils.constants import content_kinds
from rest_framework import serializers
from .utils.return_users import get_members_or_user
class UserReportSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
last_active = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = (
'pk', 'username', 'full_name', 'progress', 'last_active',
)
def get_progress(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
# progress details for a topic node and everything under it
if content_node.kind == content_kinds.TOPIC:
kinds = content_node.get_descendants().values_list('kind', flat=True).distinct()
topic_details = ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.values('kind') \
.annotate(total_progress=Sum('progress')) \
.annotate(log_count_total=Count('pk')) \
.annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))
# evaluate queryset so we can add data for kinds that do not have logs
topic_details = list(topic_details)
for kind in topic_details:
kinds.remove(kind['kind'])
for kind in kinds:
topic_details.append({'kind': kind, 'total_progress': 0.0, 'log_count_total': 0, 'log_count_complete': 0})
return topic_details
else:
# progress details for a leaf node (exercise, video, etc.)
leaf_details = ContentSummaryLog.objects \
.filter(user=target_user) \
.filter(content_id=content_node.content_id) \
.annotate(total_progress=F('progress')) \
.values('kind', 'time_spent', 'total_progress')
return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0.0}]
def get_last_active(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
try:
if content_node.kind == content_kinds.TOPIC:
return ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.latest('end_timestamp').end_timestamp
else:
return ContentSummaryLog.objects \
.filter(user=target_user) \
.get(content_id=content_node.content_id).end_timestamp
except ContentSummaryLog.DoesNotExist:
return None
def sum_progress_dicts(total_progress, progress_dict):
return total_progress + progress_dict.get('total_progress', 0.0)
def get_progress_and_last_active(target_nodes, **kwargs):
# Prepare dictionaries to output the progress and last active, keyed by content_id
output_progress_dict = {}
output_last_active_dict = {}
# Get a list of all the users that we are querying
users = list(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
# Get a list of all content ids for all target nodes and their descendants
content_ids = target_nodes.get_descendants(include_self=True).order_by().values_list("content_id", flat=True)
# get all summary logs for the current user that correspond to the content nodes and descendant content nodes
# Filter by users and the content ids
progress_query = ContentSummaryLog.objects\
.filter(user__in=users, content_id__in=content_ids)
# Conditionally filter by last active time
if kwargs.get('last_active_time'):
progress_query = progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
# Get an annotated list of dicts of type:
# {
# 'content_id': <content_id>,
# 'kind': <kind>,
# 'total_progress': <sum of all progress for this content>,
# 'log_count_total': <number of summary logs for this content>,
# 'log_count_complete': <number of complete summary logs for this content>,
# 'last_active': <most recent end_timestamp for this content>,
# }
progress_list = progress_query.values('content_id', 'kind').annotate(
total_progress=Sum('progress'),
log_count_total=Count('pk'),
log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())),
last_active=Max('end_timestamp'))
# Evaluate query and make a loop dict of all progress
progress_dict = {item.get('content_id'): item for item in progress_list}
if isinstance(target_nodes, ContentNode):
# Have been passed an individual model
target_nodes = [target_nodes]
# Loop through each node to add progress and last active information to the output dicts
for target_node in target_nodes:
# In the case of a topic, we need to look at the progress and last active from each of its descendants
if target_node.kind == content_kinds.TOPIC:
# Get all the content_ids and kinds of each leaf node as a tuple
# (about half the size of the dict from 'values' method)
# Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
# Use set to remove repeated content
leaf_nodes = set(node for node in target_node.get_descendants(include_self=False).order_by().values_list(
'content_id', 'kind') if node[1] != content_kinds.TOPIC)
# Get a unique set of all non-topic content kinds
leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
# Create a list of progress summary dicts for each content kind
progress = [{
# For total progress sum across all the progress dicts for the descendant content leaf nodes
'total_progress': reduce(
# Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
sum_progress_dicts,
# Get all dicts of progress for every leaf_id that has some progress recorded
# and matches the kind we are aggregating over
(progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
if leaf_node[0] in progress_dict and leaf_node[1] == kind),
# Pass in an initial value of total_progress as zero to initialize the reduce
0.0,
),
'kind': kind,
# Count the number of leaf nodes of this particular kind
'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
} for kind in leaf_kinds]
# Set the output progress for this topic to this list of progress dicts
output_progress_dict[target_node.content_id] = progress
# Create a generator of last active times for the leaf_ids
last_active_times = map(
# Return the last active time for this leaf_node
lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
filter(
# Filter leaf_nodes to those that are in the progress_dict
lambda leaf_node: leaf_node[0] in progress_dict,
leaf_nodes))
# Max does not handle empty iterables, so try this
try:
# If it is not empty, great!
output_last_active_dict[target_node.content_id] = max(last_active_times)
except (ValueError, TypeError):
# If it is empty, catch the value error and set the last active time to None
# If they are all none, catch the TypeError and also set to None
output_last_active_dict[target_node.content_id] = None
else:
if target_node.content_id in progress_dict:
progress = progress_dict.pop(target_node.content_id)
output_last_active_dict[target_node.content_id] = progress.pop('last_active')
# return as array for consistency in api
output_progress_dict[target_node.content_id] = [{
'total_progress': progress['total_progress'],
'log_count_total': progress['log_count_total'],
'log_count_complete': progress['log_count_complete'],
}]
elif target_node.content_id not in output_progress_dict:
# Not in the progress dict, but also not in our output, so supply default values
output_last_active_dict[target_node.content_id] = None
output_progress_dict[target_node.content_id] = [{
'total_progress': 0.0,
'log_count_total': 0,
'log_count_complete': 0,
}]
return output_progress_dict, output_last_active_dict
class ContentReportListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context:
progress_dict = {}
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(data, **kwargs)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress=progress_dict.get(item.content_id),
last_active=last_active_dict.get(item.content_id)) for item in iterable
]
class ContentReportSerializer(serializers.ModelSerializer):
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind',
)
list_serializer_class = ContentReportListSerializer
def to_representation(self, instance, progress=None, last_active=None):
if progress is None:
if 'request' not in self.context:
progress = [{'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0}]
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(instance, **kwargs)
progress = progress_dict.get(instance.content_id)
last_active = last_active_dict.get(instance.content_id)
value = super(ContentReportSerializer, self).to_representation(instance)
value['progress'] = progress
value['last_active'] = last_active
return value
class ContentSummarySerializer(ContentReportSerializer):
ancestors = serializers.SerializerMethodField()
num_users = serializers.SerializerMethodField()
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind', 'ancestors', 'num_users',
)
list_serializer_class = ContentReportListSerializer
def get_ancestors(self, target_node):
"""
in descending order (root ancestor first, immediate parent last)
"""
return target_node.get_ancestors().values('pk', 'title')
def get_num_users(self, target_node):
kwargs = self.context['view'].kwargs
return get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']).count()
class LessonReportSerializer(serializers.ModelSerializer):
"""
Annotates a Lesson with a 'progress' array, which maps 1-to-1 with Lesson.resources.
Each entry in the 'progress' array gives the total number of Learners who have
been assigned the Lesson and have 'mastered' the Resource.
"""
progress = serializers.SerializerMethodField()
total_learners = serializers.SerializerMethodField()
class Meta:
model = Lesson
fields = ('id', 'title', 'progress', 'total_learners',)
def get_progress(self, instance):
learners = instance.get_all_learners()
if learners.count() is 0:
return []
return [self._resource_progress(r, learners) for r in instance.resources]
def get_total_learners(self, instance):
return instance.get_all_learners().count()
def _resource_progress(self, resource, learners):
response = {
'contentnode_id': resource['contentnode_id'],
'num_learners_completed': 0,
}
completed_content_logs = ContentSummaryLog.objects \
.filter(
content_id=resource['content_id'],
user__in=learners,
progress=1.0,
) \
.values('content_id') \
.annotate(total=Count('pk'))
# If no logs for the Content Item,
if completed_content_logs.count() is 0:
return response
else:
response['num_learners_completed'] = completed_content_logs[0]['total']
return response
| {
"repo_name": "christianmemije/kolibri",
"path": "kolibri/plugins/coach/serializers.py",
"copies": "1",
"size": "13613",
"license": "mit",
"hash": -7244049393111873000,
"line_mean": 46.5979020979,
"line_max": 122,
"alpha_frac": 0.6157349592,
"autogenerated": false,
"ratio": 4.246101060511541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5361836019711541,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from .definition import (
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLUnionType,
)
from .directives import GraphQLIncludeDirective, GraphQLSkipDirective
from .introspection import IntrospectionSchema
class GraphQLSchema(object):
"""Schema Definition
A Schema is created by supplying the root types of each type of operation, query and mutation (optional).
A schema definition is then supplied to the validator and executor.
Example:
MyAppSchema = GraphQLSchema(
query=MyAppQueryRootType,
mutation=MyAppMutationRootType
)
"""
def __init__(self, query, mutation=None):
self.query = query
self.mutation = mutation
self._type_map = None
self._directives = None
def get_query_type(self):
return self.query
def get_mutation_type(self):
return self.mutation
def get_type_map(self):
if self._type_map is None:
self._type_map = self._build_type_map()
return self._type_map
def get_type(self, name):
return self.get_type_map().get(name)
def get_directives(self):
if self._directives is None:
self._directives = [
GraphQLIncludeDirective,
GraphQLSkipDirective
]
return self._directives
def get_directive(self, name):
for directive in self.get_directives():
if directive.name == name:
return directive
return None
def _build_type_map(self):
# TODO: make pythonic
return reduce(type_map_reducer, [
self.get_query_type(),
self.get_mutation_type(),
IntrospectionSchema,
], {})
def type_map_reducer(map, type):
if not type:
return map
if isinstance(type, GraphQLList) or isinstance(type, GraphQLNonNull):
return type_map_reducer(map, type.of_type)
if type.name in map:
assert map[type.name] == type, (
'Schema must contain unique named types but contains multiple types named "{}".'
.format(type.name)
)
return map
map[type.name] = type
reduced_map = map
if isinstance(type, (GraphQLUnionType, GraphQLInterfaceType)):
reduced_map = reduce(
type_map_reducer, type.get_possible_types(), reduced_map
)
if isinstance(type, GraphQLObjectType):
reduced_map = reduce(
type_map_reducer, type.get_interfaces(), reduced_map
)
if isinstance(type, (GraphQLObjectType, GraphQLInterfaceType, GraphQLInputObjectType)):
field_map = type.get_fields()
for field_name, field in field_map.items():
if hasattr(field, 'args'):
field_arg_types = [arg.type for arg in field.args]
reduced_map = reduce(
type_map_reducer, field_arg_types, reduced_map
)
reduced_map = type_map_reducer(reduced_map, getattr(field, 'type', None))
return reduced_map
| {
"repo_name": "dittos/graphqllib",
"path": "graphql/core/type/schema.py",
"copies": "2",
"size": "3152",
"license": "mit",
"hash": 1365237225388880600,
"line_mean": 28.1851851852,
"line_max": 109,
"alpha_frac": 0.6116751269,
"autogenerated": false,
"ratio": 4.141918528252299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003920469394174777,
"num_lines": 108
} |
from functools import reduce
from .definition import (
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLList,
GraphQLNonNull,
)
from .introspection import IntrospectionSchema
from .directives import GraphQLIncludeDirective, GraphQLSkipDirective
class GraphQLSchema(object):
"""Schema Definition
A Schema is created by supplying the root types of each type of operation, query and mutation (optional).
A schema definition is then supplied to the validator and executor.
Example:
MyAppSchema = GraphQLSchema(
query=MyAppQueryRootType,
mutation=MyAppMutationRootType
)
"""
def __init__(self, query, mutation=None):
self.query = query
self.mutation = mutation
self._type_map = None
self._directives = None
def get_query_type(self):
return self.query
def get_mutation_type(self):
return self.mutation
def get_type_map(self):
if self._type_map is None:
self._type_map = self._build_type_map()
return self._type_map
def get_type(self, name):
return self.get_type_map().get(name)
def get_directives(self):
if self._directives is None:
self._directives = [
GraphQLIncludeDirective,
GraphQLSkipDirective
]
return self._directives
def get_directive(self, name):
for directive in self.get_directives():
if directive.name == name:
return directive
return None
def _build_type_map(self):
# TODO: make pythonic
return reduce(type_map_reducer, [
self.get_query_type(),
self.get_mutation_type(),
IntrospectionSchema,
], {})
def type_map_reducer(map, type):
if isinstance(type, GraphQLList) or isinstance(type, GraphQLNonNull):
return type_map_reducer(map, type.of_type)
if not type or type.name in map:
return map
map[type.name] = type
reduced_map = map
if isinstance(type, (GraphQLUnionType, GraphQLInterfaceType)):
reduced_map = reduce(
type_map_reducer, type.get_possible_types(), reduced_map
)
if isinstance(type, GraphQLObjectType):
reduced_map = reduce(
type_map_reducer, type.get_interfaces(), reduced_map
)
if isinstance(type, (GraphQLObjectType, GraphQLInterfaceType)):
field_map = type.get_fields()
for field_name, field in field_map.items():
field_arg_types = [arg.type for arg in field.args]
reduced_map = reduce(
type_map_reducer, field_arg_types, reduced_map
)
reduced_map = type_map_reducer(reduced_map, field.type)
return reduced_map
| {
"repo_name": "elastic-coders/graphqllib",
"path": "graphql/core/type/schema.py",
"copies": "2",
"size": "2826",
"license": "mit",
"hash": -5746812159142902000,
"line_mean": 27.8367346939,
"line_max": 109,
"alpha_frac": 0.618895966,
"autogenerated": false,
"ratio": 4.083815028901734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009276437847866419,
"num_lines": 98
} |
from functools import reduce
from django import forms
from django.contrib.auth import login
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.utils.translation import gettext as _
from django.views import View
from problem.models import ProblemAuthLog, ProblemInstance, ProblemList
from problem.helpers.problem_info import get_problem_list_user_info
from .models import User, Notification
class PlusMemberCheck(UserPassesTestMixin):
# pylint: disable=no-member
def test_func(self):
return not self.request.user.is_anonymous and self.request.user.is_plus_member
permission_denied_message = "Please wait until the administrator approves your registration."
login_url = '/login'
class HomeView(PlusMemberCheck, View):
def get(self, request):
all_notifications = Notification.objects.order_by('-datetime')
solved_log_queries = []
first_solved_logs = []
for problem_instance in ProblemInstance.objects.all():
correct_auth_key = problem_instance.problem.auth_key
solve_logs = ProblemAuthLog.objects.filter(problem_instance=problem_instance, auth_key=correct_auth_key) \
.order_by('-datetime')
solved_log_queries.append(solve_logs)
if solve_logs.exists():
first_solved_logs.append(solve_logs.last())
user_last_solved = None
recent_solves = reduce(lambda x, y: x | y, solved_log_queries, ProblemAuthLog.objects.none()) \
.order_by('-datetime')
user_last_solved_query = recent_solves.filter(user=request.user)
if user_last_solved_query.exists():
user_last_solved = user_last_solved_query.first()
recent_solves = recent_solves[:10]
problem_lists = ProblemList.objects.all()
user_total_score = 0
for problem_list in problem_lists:
_, user_problemlist_score = get_problem_list_user_info(problem_list, request.user)
user_total_score += user_problemlist_score
return render(request, 'index.html', {
'notifications': all_notifications,
'recent_solves': recent_solves,
'first_solves': first_solved_logs,
'user_total_score': user_total_score,
'user_last_solved': user_last_solved
})
def validate_unique_username(value):
if User.objects.filter(username__iexact=value).count() > 0:
raise ValidationError(
_('username already exist')
)
class RegisterForm(forms.Form):
username = forms.CharField(validators=[validate_unique_username])
password = forms.CharField(min_length=8)
email = forms.EmailField()
povis_id = forms.CharField(max_length=20, required=False)
class RegisterView(View):
def get(self, request):
form = RegisterForm()
return render(request, 'registration/register.html', {
'form': form
})
def post(self, request):
form = RegisterForm(request.POST)
if not form.is_valid():
return render(request, 'registration/register.html', {
'form': form
})
user = User.objects.create_user(
form.cleaned_data['username'],
email=form.cleaned_data['email'],
password=form.cleaned_data['password'],
povis_id=form.cleaned_data['povis_id'],
)
login(request, user)
return redirect('/')
| {
"repo_name": "PLUS-POSTECH/study.plus.or.kr",
"path": "src/website/views.py",
"copies": "1",
"size": "3550",
"license": "apache-2.0",
"hash": -6612703814118607000,
"line_mean": 36.3684210526,
"line_max": 118,
"alpha_frac": 0.6529577465,
"autogenerated": false,
"ratio": 4.006772009029345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005288778553222356,
"num_lines": 95
} |
from functools import reduce
from ...exceptions import SmtlibError
import uuid
import re
import copy
from typing import Union, Type, Optional, Dict, Any
class ExpressionException(SmtlibError):
"""
Expression exception
"""
pass
class Expression:
""" Abstract taintable Expression. """
def __init__(self, taint: Union[tuple, frozenset] = ()):
if self.__class__ is Expression:
raise TypeError
super().__init__()
self._taint = frozenset(taint)
def __repr__(self):
return "<{:s} at {:x}{:s}>".format(type(self).__name__, id(self), self.taint and "-T" or "")
@property
def is_tainted(self):
return len(self._taint) != 0
@property
def taint(self):
return self._taint
def issymbolic(value) -> bool:
"""
Helper to determine whether an object is symbolic (e.g checking
if data read from memory is symbolic)
:param object value: object to check
:return: whether `value` is symbolic
:rtype: bool
"""
return isinstance(value, Expression)
def istainted(arg, taint=None):
"""
Helper to determine whether an object if tainted.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
"""
if not issymbolic(arg):
return False
if taint is None:
return len(arg.taint) != 0
for arg_taint in arg.taint:
m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE)
if m:
return True
return False
def get_taints(arg, taint=None):
"""
Helper to list an object taints.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
"""
if not issymbolic(arg):
return
for arg_taint in arg.taint:
if taint is not None:
m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE)
if m:
yield arg_taint
else:
yield arg_taint
return
def taint_with(arg, *taints, value_bits=256, index_bits=256):
"""
Helper to taint a value.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
"""
tainted_fset = frozenset(tuple(taints))
if not issymbolic(arg):
if isinstance(arg, int):
arg = BitVecConstant(value_bits, arg)
arg._taint = tainted_fset
else:
raise ValueError("type not supported")
else:
if isinstance(arg, BitVecVariable):
arg = arg + BitVecConstant(value_bits, 0, taint=tainted_fset)
else:
arg = copy.copy(arg)
arg._taint |= tainted_fset
return arg
class Variable(Expression):
def __init__(self, name: str, *args, **kwargs):
if self.__class__ is Variable:
raise TypeError
assert " " not in name
super().__init__(*args, **kwargs)
self._name = name
@property
def declaration(self):
pass
@property
def name(self):
return self._name
def __copy__(self, memo):
raise ExpressionException("Copying of Variables is not allowed.")
def __deepcopy__(self, memo):
raise ExpressionException("Copying of Variables is not allowed.")
def __repr__(self):
return "<{:s}({:s}) at {:x}>".format(type(self).__name__, self.name, id(self))
class Constant(Expression):
def __init__(self, value: Union[bool, int], *args, **kwargs):
if self.__class__ is Constant:
raise TypeError
super().__init__(*args, **kwargs)
self._value = value
@property
def value(self):
return self._value
class Operation(Expression):
def __init__(self, *operands, **kwargs):
if self.__class__ is Operation:
raise TypeError
# assert len(operands) > 0
# assert all(isinstance(x, Expression) for x in operands)
self._operands = operands
# If taint was not forced by a keyword argument, calculate default
if "taint" not in kwargs:
kwargs["taint"] = reduce(lambda x, y: x.union(y.taint), operands, frozenset())
super().__init__(**kwargs)
@property
def operands(self):
return self._operands
###############################################################################
# Booleans
class Bool(Expression):
def __init__(self, *operands, **kwargs):
super().__init__(*operands, **kwargs)
def cast(self, value: Union[int, bool], **kwargs) -> Union["BoolConstant", "Bool"]:
if isinstance(value, Bool):
return value
return BoolConstant(bool(value), **kwargs)
def __cmp__(self, *args):
raise NotImplementedError("CMP for Bool")
def __invert__(self):
return BoolNot(self)
def __eq__(self, other):
return BoolEqual(self, self.cast(other))
def __hash__(self):
return object.__hash__(self)
def __ne__(self, other):
return BoolNot(self == self.cast(other))
def __and__(self, other):
return BoolAnd(self, self.cast(other))
def __or__(self, other):
return BoolOr(self, self.cast(other))
def __xor__(self, other):
return BoolXor(self, self.cast(other))
def __rand__(self, other):
return BoolAnd(self.cast(other), self)
def __ror__(self, other):
return BoolOr(self.cast(other), self)
def __rxor__(self, other):
return BoolXor(self.cast(other), self)
def __bool__(self):
raise NotImplementedError("__bool__ for Bool")
class BoolVariable(Bool, Variable):
def __init__(self, name, *args, **kwargs):
super().__init__(name, *args, **kwargs)
@property
def declaration(self):
return f"(declare-fun {self.name} () Bool)"
class BoolConstant(Bool, Constant):
def __init__(self, value: bool, *args, **kwargs):
super().__init__(value, *args, **kwargs)
def __bool__(self):
return self.value
class BoolOperation(Operation, Bool):
def __init__(self, *operands, **kwargs):
super().__init__(*operands, **kwargs)
class BoolNot(BoolOperation):
def __init__(self, value, **kwargs):
super().__init__(value, **kwargs)
class BoolAnd(BoolOperation):
def __init__(self, a, b, **kwargs):
super().__init__(a, b, **kwargs)
class BoolOr(BoolOperation):
def __init__(self, a: "Bool", b: "Bool", **kwargs):
super().__init__(a, b, **kwargs)
class BoolXor(BoolOperation):
def __init__(self, a, b, **kwargs):
super().__init__(a, b, **kwargs)
class BoolITE(BoolOperation):
def __init__(self, cond: "Bool", true: "Bool", false: "Bool", **kwargs):
super().__init__(cond, true, false, **kwargs)
class BitVec(Expression):
""" This adds a bitsize to the Expression class """
def __init__(self, size, *operands, **kwargs):
super().__init__(*operands, **kwargs)
self.size = size
@property
def mask(self):
return (1 << self.size) - 1
@property
def signmask(self):
return 1 << (self.size - 1)
def cast(
self, value: Union["BitVec", str, int, bytes], **kwargs
) -> Union["BitVecConstant", "BitVec"]:
if isinstance(value, BitVec):
assert value.size == self.size
return value
if isinstance(value, (str, bytes)) and len(value) == 1:
value = ord(value)
# Try to support not Integral types that can be casted to int
if not isinstance(value, int):
value = int(value)
# FIXME? Assert it fits in the representation
return BitVecConstant(self.size, value, **kwargs)
def __add__(self, other):
return BitVecAdd(self, self.cast(other))
def __sub__(self, other):
return BitVecSub(self, self.cast(other))
def __mul__(self, other):
return BitVecMul(self, self.cast(other))
def __mod__(self, other):
return BitVecMod(self, self.cast(other))
# object.__divmod__(self, other)
# object.__pow__(self, other[, modulo])
def __lshift__(self, other):
return BitVecShiftLeft(self, self.cast(other))
def __rshift__(self, other):
return BitVecShiftRight(self, self.cast(other))
def __and__(self, other):
return BitVecAnd(self, self.cast(other))
def __xor__(self, other):
return BitVecXor(self, self.cast(other))
def __or__(self, other):
return BitVecOr(self, self.cast(other))
# The division operator (/) is implemented by these methods. The
# __truediv__() method is used when __future__.division is in effect,
# otherwise __div__() is used. If only one of these two methods is
# defined, the object will not support division in the alternate context;
# TypeError will be raised instead.
def __div__(self, other):
return BitVecDiv(self, self.cast(other))
def __truediv__(self, other):
return BitVecDiv(self, self.cast(other))
def __floordiv__(self, other):
return self / other
# These methods are called to implement the binary arithmetic operations
# (+, # -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with reflected
# (swapped) operands. These functions are only called if the left operand
# does not support the corresponding operation and the operands are of
# different types. [2] For instance, to evaluate the expression x - y,
# where y is an instance of a class that has an __rsub__() method,
# y.__rsub__(x) is called if x.__sub__(y) returns NotImplemented.
def __radd__(self, other):
return BitVecAdd(self.cast(other), self)
def __rsub__(self, other):
return BitVecSub(self.cast(other), self)
def __rmul__(self, other):
return BitVecMul(self.cast(other), self)
def __rmod__(self, other):
return BitVecMod(self.cast(other), self)
def __rtruediv__(self, other):
return BitVecDiv(self.cast(other), self)
def __rdiv__(self, other):
return BitVecDiv(self.cast(other), self)
# object.__rdivmod__(self, other)
# object.__rpow__(self, other)
def __rlshift__(self, other):
return BitVecShiftLeft(self.cast(other), self)
def __rrshift__(self, other):
return BitVecShiftRight(self.cast(other), self)
def __rand__(self, other):
return BitVecAnd(self.cast(other), self)
def __rxor__(self, other):
return BitVecXor(self.cast(other), self)
def __ror__(self, other):
return BitVecOr(self.cast(other), self)
def __invert__(self):
return BitVecXor(self, self.cast(self.mask))
# These are the so-called "rich comparison" methods, and are called
# for comparison operators in preference to __cmp__() below. The
# correspondence between operator symbols and method names is as
# follows:
# x<y calls x.__lt__(y),
# x<=y calls x.__le__(y),
# x==y calls x.__eq__(y),
# x!=y and x<>y call x.__ne__(y),
# x>y calls x.__gt__(y), and
# x>=y calls x.__ge__(y).
def __lt__(self, other):
return LessThan(self, self.cast(other))
def __le__(self, other):
return LessOrEqual(self, self.cast(other))
def __eq__(self, other):
return BoolEqual(self, self.cast(other))
def __hash__(self):
return object.__hash__(self)
def __ne__(self, other):
return BoolNot(BoolEqual(self, self.cast(other)))
def __gt__(self, other):
return GreaterThan(self, self.cast(other))
def __ge__(self, other):
return GreaterOrEqual(self, self.cast(other))
def __neg__(self):
return BitVecNeg(self)
# Unsigned comparisons
def ugt(self, other):
return UnsignedGreaterThan(self, self.cast(other))
def uge(self, other):
return UnsignedGreaterOrEqual(self, self.cast(other))
def ult(self, other):
return UnsignedLessThan(self, self.cast(other))
def ule(self, other):
return UnsignedLessOrEqual(self, self.cast(other))
def udiv(self, other):
return BitVecUnsignedDiv(self, self.cast(other))
def rudiv(self, other):
return BitVecUnsignedDiv(self.cast(other), self)
def sdiv(self, other):
return BitVecDiv(self, self.cast(other))
def rsdiv(self, other):
return BitVecDiv(self.cast(other), self)
def srem(self, other):
return BitVecRem(self, self.cast(other))
def rsrem(self, other):
return BitVecRem(self.cast(other), self)
def urem(self, other):
return BitVecUnsignedRem(self, self.cast(other))
def rurem(self, other):
return BitVecUnsignedRem(self.cast(other), self)
def sar(self, other):
return BitVecArithmeticShiftRight(self, self.cast(other))
def sal(self, other):
return BitVecArithmeticShiftLeft(self, self.cast(other))
def Bool(self):
return self != 0
class BitVecVariable(BitVec, Variable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def declaration(self):
return f"(declare-fun {self.name} () (_ BitVec {self.size}))"
class BitVecConstant(BitVec, Constant):
def __init__(self, size: int, value: int, *args, **kwargs):
super().__init__(size, value, *args, **kwargs)
def __bool__(self):
return self.value != 0
def __eq__(self, other):
if self.taint:
return super().__eq__(other)
return self.value == other
def __hash__(self):
return super().__hash__()
class BitVecOperation(BitVec, Operation):
def __init__(self, size, *operands, **kwargs):
super().__init__(size, *operands, **kwargs)
class BitVecAdd(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecSub(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecMul(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecDiv(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecUnsignedDiv(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecMod(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecRem(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecUnsignedRem(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecShiftLeft(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecShiftRight(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecArithmeticShiftLeft(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecArithmeticShiftRight(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecAnd(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecOr(BitVecOperation):
def __init__(self, a: BitVec, b: BitVec, *args, **kwargs):
assert a.size == b.size
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecXor(BitVecOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a.size, a, b, *args, **kwargs)
class BitVecNot(BitVecOperation):
def __init__(self, a, **kwargs):
super().__init__(a.size, a, **kwargs)
class BitVecNeg(BitVecOperation):
def __init__(self, a, *args, **kwargs):
super().__init__(a.size, a, *args, **kwargs)
# Comparing two bitvectors results in a Bool
class LessThan(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a, b, *args, **kwargs)
class LessOrEqual(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a, b, *args, **kwargs)
class BoolEqual(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
if isinstance(a, BitVec) or isinstance(b, BitVec):
assert a.size == b.size
super().__init__(a, b, *args, **kwargs)
class GreaterThan(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
assert a.size == b.size
super().__init__(a, b, *args, **kwargs)
class GreaterOrEqual(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
assert a.size == b.size
super().__init__(a, b, *args, **kwargs)
class UnsignedLessThan(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
super().__init__(a, b, *args, **kwargs)
assert a.size == b.size
class UnsignedLessOrEqual(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
assert a.size == b.size
super().__init__(a, b, *args, **kwargs)
class UnsignedGreaterThan(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
assert a.size == b.size
super().__init__(a, b, *args, **kwargs)
class UnsignedGreaterOrEqual(BoolOperation):
def __init__(self, a, b, *args, **kwargs):
assert a.size == b.size
super(UnsignedGreaterOrEqual, self).__init__(a, b, *args, **kwargs)
###############################################################################
# Array BV32 -> BV8 or BV64 -> BV8
class Array(Expression):
def __init__(
self, index_bits: int, index_max: Optional[int], value_bits: int, *operands, **kwargs
):
assert index_bits in (32, 64, 256)
assert value_bits in (8, 16, 32, 64, 256)
assert index_max is None or index_max >= 0 and index_max < 2 ** index_bits
self._index_bits = index_bits
self._index_max = index_max
self._value_bits = value_bits
super().__init__(*operands, **kwargs)
assert type(self) is not Array, "Abstract class"
def _get_size(self, index):
start, stop = self._fix_index(index)
size = stop - start
if isinstance(size, BitVec):
from .visitors import simplify
size = simplify(size)
else:
size = BitVecConstant(self.index_bits, size)
assert isinstance(size, BitVecConstant)
return size.value
def _fix_index(self, index):
"""
:param slice index:
"""
stop, start = index.stop, index.start
if start is None:
start = 0
if stop is None:
stop = len(self)
return start, stop
def cast(self, possible_array):
if isinstance(possible_array, bytearray):
# FIXME This should be related to a constrainSet
arr = ArrayVariable(self.index_bits, len(possible_array), 8)
for pos, byte in enumerate(possible_array):
arr = arr.store(pos, byte)
return arr
raise ValueError # cast not implemented
def cast_index(self, index: Union[int, "BitVec"]) -> Union["BitVecConstant", "BitVec"]:
if isinstance(index, int):
# assert self.index_max is None or index >= 0 and index < self.index_max
return BitVecConstant(self.index_bits, index)
assert index.size == self.index_bits
return index
def cast_value(
self, value: Union["BitVec", str, bytes, int]
) -> Union["BitVecConstant", "BitVec"]:
if isinstance(value, BitVec):
assert value.size == self.value_bits
return value
if isinstance(value, (str, bytes)) and len(value) == 1:
value = ord(value)
if not isinstance(value, int):
value = int(value)
return BitVecConstant(self.value_bits, value)
def __len__(self):
if self.index_max is None:
raise ExpressionException("Array max index not set")
return self.index_max
@property
def index_bits(self):
return self._index_bits
@property
def value_bits(self):
return self._value_bits
@property
def index_max(self):
return self._index_max
def select(self, index):
index = self.cast_index(index)
return ArraySelect(self, index)
def store(self, index, value):
return ArrayStore(self, self.cast_index(index), self.cast_value(value))
def write(self, offset, buf):
if not isinstance(buf, (Array, bytearray)):
raise TypeError("Array or bytearray expected got {:s}".format(type(buf)))
arr = self
for i, val in enumerate(buf):
arr = arr.store(offset + i, val)
return arr
def read(self, offset, size):
return ArraySlice(self, offset, size)
def __getitem__(self, index):
if isinstance(index, slice):
start, stop = self._fix_index(index)
size = self._get_size(index)
return ArraySlice(self, start, size)
else:
if self.index_max is not None:
if not isinstance(index, Expression) and index >= self.index_max:
raise IndexError
return self.select(self.cast_index(index))
def __eq__(self, other):
# FIXME taint
def compare_buffers(a, b):
if len(a) != len(b):
return BoolConstant(False)
cond = BoolConstant(True)
for i in range(len(a)):
cond = BoolAnd(cond.cast(a[i] == b[i]), cond)
if cond is BoolConstant(False):
return BoolConstant(False)
return cond
return compare_buffers(self, other)
def __ne__(self, other):
return BoolNot(self == other)
def __hash__(self):
return super().__hash__()
@property
def underlying_variable(self):
array = self
while not isinstance(array, ArrayVariable):
array = array.array
return array
def read_BE(self, address, size):
bytes = []
for offset in range(size):
bytes.append(self.get(address + offset, 0))
return BitVecConcat(size * self.value_bits, *bytes)
def read_LE(self, address, size):
address = self.cast_index(address)
bytes = []
for offset in range(size):
bytes.append(self.get(address + offset, 0))
return BitVecConcat(size * self.value_bits, *reversed(bytes))
def write_BE(self, address, value, size):
address = self.cast_index(address)
value = BitVec(size * self.value_bits).cast(value)
array = self
for offset in range(size):
array = array.store(
address + offset,
BitVecExtract(value, (size - 1 - offset) * self.value_bits, self.value_bits),
)
return array
def write_LE(self, address, value, size):
address = self.cast_index(address)
value = BitVec(size * self.value_bits).cast(value)
array = self
for offset in reversed(range(size)):
array = array.store(
address + offset,
BitVecExtract(value, (size - 1 - offset) * self.value_bits, self.value_bits),
)
return array
def __add__(self, other):
if not isinstance(other, (Array, bytearray)):
raise TypeError("can't concat Array to {}".format(type(other)))
if isinstance(other, Array):
if self.index_bits != other.index_bits or self.value_bits != other.value_bits:
raise ValueError("Array sizes do not match for concatenation")
from .visitors import simplify
# FIXME This should be related to a constrainSet
new_arr = ArrayProxy(
ArrayVariable(
self.index_bits,
self.index_max + len(other),
self.value_bits,
"concatenation{}".format(uuid.uuid1()),
)
)
for index in range(self.index_max):
new_arr[index] = simplify(self[index])
for index in range(len(other)):
new_arr[index + self.index_max] = simplify(other[index])
return new_arr
def __radd__(self, other):
if not isinstance(other, (Array, bytearray, bytes)):
raise TypeError("can't concat Array to {}".format(type(other)))
if isinstance(other, Array):
if self.index_bits != other.index_bits or self.value_bits != other.value_bits:
raise ValueError("Array sizes do not match for concatenation")
from .visitors import simplify
# FIXME This should be related to a constrainSet
new_arr = ArrayProxy(
ArrayVariable(
self.index_bits,
self.index_max + len(other),
self.value_bits,
"concatenation{}".format(uuid.uuid1()),
)
)
for index in range(len(other)):
new_arr[index] = simplify(other[index])
_concrete_cache = new_arr._concrete_cache
for index in range(self.index_max):
new_arr[index + len(other)] = simplify(self[index])
new_arr._concrete_cache.update(_concrete_cache)
return new_arr
class ArrayVariable(Array, Variable):
def __init__(self, index_bits, index_max, value_bits, name, *operands, **kwargs):
super().__init__(index_bits, index_max, value_bits, name, **kwargs)
@property
def declaration(self):
return f"(declare-fun {self.name} () (Array (_ BitVec {self.index_bits}) (_ BitVec {self.value_bits})))"
class ArrayOperation(Array, Operation):
def __init__(self, array: Array, *operands, **kwargs):
super().__init__(
array.index_bits, array.index_max, array.value_bits, array, *operands, **kwargs
)
class ArrayStore(ArrayOperation):
def __init__(self, array: "Array", index: "BitVec", value: "BitVec", *args, **kwargs):
assert index.size == array.index_bits
assert value.size == array.value_bits
super().__init__(array, index, value, *args, **kwargs)
@property
def array(self):
return self.operands[0]
@property
def name(self):
return self.operands[0].name
@property
def index(self):
return self.operands[1]
@property
def value(self):
return self.operands[2]
class ArraySlice(Array):
def __init__(
self, array: Union["Array", "ArrayProxy"], offset: int, size: int, *args, **kwargs
):
if not isinstance(array, Array):
raise ValueError("Array expected")
if isinstance(array, ArrayProxy):
array = array._array
super().__init__(array.index_bits, array.index_max, array.value_bits, *args, **kwargs)
self._array = array
self._slice_offset = offset
self._slice_size = size
@property
def underlying_variable(self):
return self._array.underlying_variable
@property
def operands(self):
return self._array.operands
@property
def index_bits(self):
return self._array.index_bits
@property
def index_max(self):
return self._slice_size
@property
def value_bits(self):
return self._array.value_bits
@property
def taint(self):
return self._array.taint
def select(self, index):
return self._array.select(index + self._slice_offset)
def store(self, index, value):
return ArraySlice(
self._array.store(index + self._slice_offset, value),
self._slice_offset,
self._slice_size,
)
class ArrayProxy(Array):
def __init__(self, array: Array, default: Optional[int] = None):
self._default = default
self._concrete_cache: Dict[int, int] = {}
self._written = None
if isinstance(array, ArrayProxy):
# copy constructor
super().__init__(array.index_bits, array.index_max, array.value_bits)
self._array: Array = array._array
self._name: str = array._name
if default is None:
self._default = array._default
self._concrete_cache = dict(array._concrete_cache)
self._written = set(array.written)
elif isinstance(array, ArrayVariable):
# fresh array proxy
super().__init__(array.index_bits, array.index_max, array.value_bits)
self._array = array
self._name = array.name
else:
# arrayproxy for a prepopulated array
super().__init__(array.index_bits, array.index_max, array.value_bits)
self._name = array.underlying_variable.name
self._array = array
@property
def underlying_variable(self):
return self._array.underlying_variable
@property
def array(self):
return self._array
@property
def name(self):
return self._name
@property
def operands(self):
return self._array.operands
@property
def index_bits(self):
return self._array.index_bits
@property
def index_max(self):
return self._array.index_max
@property
def value_bits(self):
return self._array.value_bits
@property
def taint(self):
return self._array.taint
def select(self, index):
return self.get(index)
def store(self, index, value):
if not isinstance(index, Expression):
index = self.cast_index(index)
if not isinstance(value, Expression):
value = self.cast_value(value)
from .visitors import simplify
index = simplify(index)
if isinstance(index, Constant):
self._concrete_cache[index.value] = value
else:
# delete all cache as we do not know what this may overwrite.
self._concrete_cache = {}
# potentially generate and update .written set
self.written.add(index)
self._array = self._array.store(index, value)
return self
def __getitem__(self, index):
if isinstance(index, slice):
start, stop = self._fix_index(index)
size = self._get_size(index)
array_proxy_slice = ArrayProxy(ArraySlice(self, start, size), default=self._default)
array_proxy_slice._concrete_cache = {}
for k, v in self._concrete_cache.items():
if k >= start and k < start + size:
array_proxy_slice._concrete_cache[k - start] = v
for i in self.written:
array_proxy_slice.written.add(i - start)
return array_proxy_slice
else:
if self.index_max is not None:
if not isinstance(index, Expression) and index >= self.index_max:
raise IndexError
return self.get(index, self._default)
def __setitem__(self, index, value):
if isinstance(index, slice):
start, stop = self._fix_index(index)
size = self._get_size(index)
assert len(value) == size
for i in range(size):
self.store(start + i, value[i])
else:
self.store(index, value)
def __getstate__(self):
state = {}
state["_default"] = self._default
state["_array"] = self._array
state["name"] = self.name
state["_concrete_cache"] = self._concrete_cache
state["_written"] = self._written
return state
def __setstate__(self, state):
self._default = state["_default"]
self._array = state["_array"]
self._name = state["name"]
self._concrete_cache = state["_concrete_cache"]
self._written = state["_written"]
def __copy__(self):
return ArrayProxy(self)
@property
def written(self):
# Calculate only first time
if self._written is None:
written = set()
# take out Proxy sleve
array = self._array
offset = 0
while isinstance(array, ArraySlice):
# if it is a proxy over a slice take out the slice too
offset += array._slice_offset
array = array._array
while not isinstance(array, ArrayVariable):
# The index written to underlaying Array are displaced when sliced
written.add(array.index - offset)
array = array.array
assert isinstance(array, ArrayVariable)
self._written = written
return self._written
def is_known(self, index):
if isinstance(index, Constant) and index.value in self._concrete_cache:
return BoolConstant(True)
is_known_index = BoolConstant(False)
written = self.written
for known_index in written:
if isinstance(index, Constant) and isinstance(known_index, Constant):
if known_index.value == index.value:
return BoolConstant(True)
is_known_index = BoolOr(is_known_index.cast(index == known_index), is_known_index)
return is_known_index
def get(self, index, default=None):
if default is None:
default = self._default
index = self.cast_index(index)
if self.index_max is not None:
from .visitors import simplify
index = simplify(
BitVecITE(self.index_bits, index < 0, self.index_max + index + 1, index)
)
if isinstance(index, Constant) and index.value in self._concrete_cache:
return self._concrete_cache[index.value]
value = self._array.select(index)
if default is None:
return value
is_known = self.is_known(index)
default = self.cast_value(default)
return BitVecITE(self._array.value_bits, is_known, value, default)
class ArraySelect(BitVec, Operation):
def __init__(self, array: "Array", index: "BitVec", *args, **kwargs):
assert index.size == array.index_bits
super().__init__(array.value_bits, array, index, *args, **kwargs)
@property
def array(self):
return self.operands[0]
@property
def index(self):
return self.operands[1]
def __repr__(self):
return f"<ArraySelect obj with index={self.index}:\n{self.array}>"
class BitVecSignExtend(BitVecOperation):
def __init__(self, operand: "BitVec", size_dest: int, *args, **kwargs):
assert size_dest >= operand.size
super().__init__(size_dest, operand, *args, **kwargs)
self.extend = size_dest - operand.size
class BitVecZeroExtend(BitVecOperation):
def __init__(self, size_dest: int, operand: "BitVec", *args, **kwargs):
assert size_dest >= operand.size
super().__init__(size_dest, operand, *args, **kwargs)
self.extend = size_dest - operand.size
class BitVecExtract(BitVecOperation):
def __init__(self, operand: "BitVec", offset: int, size: int, *args, **kwargs):
assert offset >= 0 and offset + size <= operand.size
super().__init__(size, operand, *args, **kwargs)
self._begining = offset
self._end = offset + size - 1
@property
def value(self):
return self.operands[0]
@property
def begining(self):
return self._begining
@property
def end(self):
return self._end
class BitVecConcat(BitVecOperation):
def __init__(self, size_dest: int, *operands, **kwargs):
assert all(isinstance(x, BitVec) for x in operands)
assert size_dest == sum(x.size for x in operands)
super().__init__(size_dest, *operands, **kwargs)
class BitVecITE(BitVecOperation):
def __init__(
self,
size: int,
condition: Union["Bool", bool],
true_value: "BitVec",
false_value: "BitVec",
*args,
**kwargs,
):
assert true_value.size == size
assert false_value.size == size
super().__init__(size, condition, true_value, false_value, *args, **kwargs)
| {
"repo_name": "montyly/manticore",
"path": "manticore/core/smtlib/expression.py",
"copies": "1",
"size": "36789",
"license": "apache-2.0",
"hash": 5668414571755638000,
"line_mean": 29.5556478405,
"line_max": 133,
"alpha_frac": 0.5786512273,
"autogenerated": false,
"ratio": 3.8425945268435346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9919703849652517,
"avg_score": 0.0003083808982036364,
"num_lines": 1204
} |
from functools import reduce
from GF2 import *
# version code 75eb0ae74c69
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
## 1: (Problem 1) Python Comprehensions: Filtering
def myFilter(L, num):
'''
Input:
-L: a list of numbers
-num: a positive integer
Output:
-a list of numbers not containing a multiple of num
Examples:
>>> myFilter([1,2,4,5,7],2)
[1, 5, 7]
>>> myFilter([10,15,20,25],10)
[15, 25]
'''
return [ x for x in L if x%num != 0 ]
## 2: (Problem 2) Python Comprehensions: Lists of Lists
def my_lists(L):
'''
>>> my_lists([1,2,4])
[[1], [1, 2], [1, 2, 3, 4]]
>>> my_lists([0,3])
[[], [1, 2, 3]]
'''
return [ list(range(1, x + 1)) for x in L ]
## 3: (Problem 3) Python Comprehensions: Function Composition
def myFunctionComposition(f, g):
'''
Input:
-f: a function represented as a dictionary such that g of f exists
-g: a function represented as a dictionary such that g of f exists
Output:
-a dictionary that represents a function g of f
Examples:
>>> f = {0:'a',1:'b'}
>>> g = {'a':'apple','b':'banana'}
>>> myFunctionComposition(f,g) == {0:'apple',1:'banana'}
True
>>> a = {'x':24,'y':25}
>>> b = {24:'twentyfour',25:'twentyfive'}
>>> myFunctionComposition(a,b) == {'x':'twentyfour','y':'twentyfive'}
True
'''
return { key: g[f[key]] for key in f.keys() }
## 4: (Problem 4) Summing numbers in a list
def mySum(L):
'''
Input:
a list L of numbers
Output:
sum of the numbers in L
Be sure your procedure works for the empty list.
Examples:
>>> mySum([1,2,3,4])
10
>>> mySum([3,5,10])
18
'''
return reduce(lambda a, b: a + b, L) if L else 0
## 5: (Problem 5) Multiplying numbers in a list
def myProduct(L):
'''
Input:
-L: a list of numbers
Output:
-the product of the numbers in L
Be sure your procedure works for the empty list.
Examples:
>>> myProduct([1,3,5])
15
>>> myProduct([-3,2,4])
-24
'''
return reduce(lambda a, b: a * b, L) if L else 0
## 6: (Problem 6) Minimum of a list
def myMin(L):
'''
Input:
a list L of numbers
Output:
the minimum number in L
Be sure your procedure works for the empty list.
Hint: The value of the Python expression float('infinity') is infinity.
Examples:
>>> myMin([1,-100,2,3])
-100
>>> myMin([0,3,5,-2,-5])
-5
'''
return reduce(lambda a, b: a if a < b else b, L) if L else float('infinity')
## 7: (Problem 7) Concatenation of a List
def myConcat(L):
'''
Input:
-L:a list of strings
Output:
-the concatenation of all the strings in L
Be sure your procedure works for the empty list.
Examples:
>>> myConcat(['hello','world'])
'helloworld'
>>> myConcat(['what','is','up'])
'whatisup'
'''
return reduce(lambda a, b: a + b, L) if L else ""
## 8: (Problem 8) Union of Sets in a List
def myUnion(L):
'''
Input:
-L:a list of sets
Output:
-the union of all sets in L
Be sure your procedure works for the empty list.
Examples:
>>> myUnion([{1,2},{2,3}])
{1, 2, 3}
>>> myUnion([set(),{3,5},{3,5}])
{3, 5}
'''
return reduce(lambda a, b: a | b, L) if L else {}
## 9: (Problem 9) Complex Addition Practice
# Each answer should be a Python expression whose value is a complex number.
complex_addition_a = (3 + 1j) + (2 + 2j)
complex_addition_b = (-1 + 2j) + (1 + -1j)
complex_addition_c = (2 + 0j) + (-3 + .001j)
complex_addition_d = 4 * (0 + 2j) + (.001 + 1j)
## 10: (Problem 10) Combining Complex Operations
#Write a procedure that evaluates ax+b for all elements in L
def transform(a, b, L):
'''
Input:
-a: a number
-b: a number
-L: a list of numbers
Output:
-a list of elements where each element is ax+b where x is an element in L
Examples:
>>> transform(3,2,[1,2,3])
[5, 8, 11]
'''
return [ a * x + b for x in L ]
## 11: (Problem 11) GF(2) Arithmetic
GF2_sum_1 = one + one + one + 0
GF2_sum_2 = one * one + zero * one + zero * zero + one * one
GF2_sum_3 = (one + one + one) * (one + one + one + one)
| {
"repo_name": "josiah14/linear-algebra",
"path": "programming-the-matrix/0-week/the-field-problems/Python/The_Field_problems.py",
"copies": "1",
"size": "4322",
"license": "mit",
"hash": -1249987680513122000,
"line_mean": 25.0361445783,
"line_max": 80,
"alpha_frac": 0.5645534475,
"autogenerated": false,
"ratio": 3.0959885386819486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9098645694757397,
"avg_score": 0.012379258284910285,
"num_lines": 166
} |
from functools import reduce
from hearthbreaker.agents.trade.util import memoized
class FakeCard:
def __init__(self, card):
self.health = card.health
self.attack = card.base_attack
self.base_attack = card.base_attack
if hasattr(card, "taunt"):
self.taunt = card.taunt
class Trade:
def __init__(self, player, my_minion, opp_minion):
self.player = player
self.my_minion = my_minion
self.opp_minion = opp_minion
@memoized
def after_attack(self):
res = {}
res["my_minion"] = self.after_damage(self.my_minion, self.opp_minion)
res["opp_minion"] = self.after_damage(self.opp_minion, self.my_minion)
return res
def after_damage(self, target, attacker):
res = FakeCard(target)
res.health -= attacker.calculate_attack()
return res
def start_value(self):
me = self.minion_value(self.my_minion)
opp = self.minion_value(self.opp_minion)
return me - opp
def end_value(self):
me = self.minion_value(self.after_attack()['my_minion'])
opp = self.minion_value(self.after_attack()['opp_minion'])
return me - opp
@memoized
def value(self):
res = self.end_value() - self.start_value()
if self.after_attack()['my_minion'].health > 0 and \
self.after_attack()['opp_minion'].health <= 0:
res += 1.0
return round(res, 2)
def minion_desc(self, minion):
return "{} {}/{}".format(minion.try_name(), minion.base_attack,
minion.health)
def __str__(self):
s = "Trade {} for {} Value {}"
return s.format(self.minion_desc(self.my_minion),
self.minion_desc(self.opp_minion),
self.value())
def minion_value(self, minion):
if minion.health <= 0:
return 0
res = (minion.base_attack + 0.5) * minion.health ** 1.5
if minion.taunt:
res += 0.5
return res ** 0.4
def is_opp_dead(self):
return self.after_attack()['opp_minion'].health <= 0
def needs_sequence(self):
return True
class TradeSequence:
def __init__(self, current_trades_obj, past_trades=[]):
self.past_trades = past_trades
self.current_trades_obj = current_trades_obj
def after_next_trade(self, next_trade):
past_trades = [t for t in self.past_trades]
past_trades.append(next_trade)
to = self.current_trades_obj
trades_obj = Trades(to.player, to.attack_minions,
to.opp_minions, to.opp_hero.copy(None, None))
trades_obj.attack_minions.remove(next_trade.my_minion)
if next_trade.is_opp_dead():
trades_obj.opp_minions.remove(next_trade.opp_minion)
res = TradeSequence(trades_obj, past_trades)
return res
def has_lethal(self):
return self.current_trades_obj.has_lethal()
def past_trade_value(self):
if self.has_lethal():
return 99999999
else:
return reduce(lambda s, t: s + t.value(), self.past_trades, 0.0)
@memoized
def future_trade_value(self):
if self.has_lethal():
return 9999999999
if len(self.current_trades_obj.attack_minions) == 0:
return 0.0
if len(self.past_trades) > 1:
return 0
next_trades = self.current_trades_obj.trades()
if len(next_trades) == 0:
return 0.0
if len(next_trades) > 1000000:
return 0.0
if self.current_trades_obj.opp_has_taunt():
best_value = -99999999999.0
for next_trade in next_trades:
next_seq = self.after_next_trade(next_trade)
full = next_trade.value() + next_seq.future_trade_value()
if full > best_value:
best_value = full
return best_value
else:
return next_trades[0].value()
@memoized
def trade_value(self):
return self.past_trade_value() + self.future_trade_value()
class FaceTrade(Trade):
def value(self):
if self.is_lethal():
return 9999999
return self.my_minion.base_attack * 0.2
def __str__(self):
return "Face {} Value {}".format(self.minion_desc(self.my_minion),
self.value())
def is_lethal(self):
return self.my_minion.base_attack >= self.opp_minion.health
def needs_sequence(self):
return False
class Trades:
def __init__(self, player, attack_minions, opp_minions, opp_hero):
self.player = player
self.attack_minions = attack_minions[0:99999]
self.opp_minions = opp_minions[0:99999]
self.opp_hero = opp_hero
def opp_has_taunt(self):
for minion in self.opp_minions:
if minion.taunt:
return True
return False
def total_attack(self):
return reduce(lambda s, i: s + i.base_attack, self.attack_minions, 0)
@memoized
def has_lethal(self):
return not self.opp_has_taunt() and \
self.total_attack() >= self.opp_hero.health
@memoized
def trade_value(self, trade):
if not trade.needs_sequence() or len(self.attack_minions) <= 1:
return trade.value()
seq = TradeSequence(self).after_next_trade(trade)
return seq.trade_value()
@memoized
def trades(self):
res = []
me = self.attack_minions
opp = self.targetable_minions(self.opp_minions)
if not self.has_lethal():
for my_minion in me:
for opp_minion in opp:
trade = Trade(self.player, my_minion, opp_minion)
res.append(trade)
if not self.opp_has_taunt():
for my_minion in me:
trade = FaceTrade(self.player, my_minion, self.opp_hero)
res.append(trade)
if self.opp_has_taunt():
if len(res) >= 12:
res = sorted(res, key=lambda t: t.value())[0:4]
elif len(res) >= 8:
res = sorted(res, key=lambda t: t.value())[0:3]
else:
res = sorted(res, key=self.trade_value)
else:
res = sorted(res, key=lambda t: t.value())
res.reverse()
return res
def targetable_minions(self, all):
taunt = [m for m in filter(lambda m: m.taunt, all)]
if len(taunt) > 0:
return taunt
else:
return all
def __str__(self):
res = ["TRADES:"]
for t in self.trades():
s = t.__str__()
s += " Root Value: {}".format(self.trade_value(t))
res.append(s)
return str.join("\n", res)
class TradeMixin:
def trades(self, player):
res = Trades(player, self.attack_minions(player),
player.opponent.minions, player.opponent.hero)
return [t for t in res.trades() if t.value() > -1]
class AttackMixin:
def attack_once(self, player):
trades = self.trades(player)
if len(trades) > 0:
self.current_trade = trades[0]
self.current_trade.my_minion.attack()
def attack(self, player):
if len(self.trades(player)) > 0:
self.attack_once(player)
self.attack(player)
def attack_minions(self, player):
res = [minion
for minion
in filter(lambda minion: minion.can_attack(), player.minions)]
if player.hero.can_attack() and False:
res.append(player.hero)
return res
| {
"repo_name": "anuragpapineni/Hearthbreaker-evolved-agent",
"path": "hearthbreaker/agents/trade/trade.py",
"copies": "2",
"size": "7732",
"license": "mit",
"hash": -6952166985474704000,
"line_mean": 28.7384615385,
"line_max": 78,
"alpha_frac": 0.5531557165,
"autogenerated": false,
"ratio": 3.4970601537765718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 260
} |
from functools import reduce
from . import Parameter
from re import compile, MULTILINE
import re
class Configuration:
def __init__(self, file_path):
self.file_path = file_path
def load_parameters(self, names):
configs = self._get_parameters_from_config()
return {name: self._get_param_status_and_value(text)
for name, text in configs.items()
if name in names}
def _get_parameters_from_config(self):
regex = re.compile('^([A-Z]|_|[0-9]|#)*=.*')
with open(self.file_path) as config_file:
self.text = config_file.read()
get_name = lambda text: text.split('=')[0].replace('#', '')
return {get_name(row): row
for row in self.text.splitlines()
if regex.match(row)}
def _get_param_status_and_value(self, text):
return (not text.startswith('#'), text.split('=')[1])
def save_as(self, parameters, file_path):
self.file_path = file_path
self.save(parameters)
def save(self, parameters):
with open(self.file_path, 'w') as config_file:
text = reduce(lambda text, param: self._write(text, param.to_text()),
[param for param in parameters],
self.text)
config_file.write(text)
self.text = text
def _write(self, text, changes):
old_parameter, new_parameter = changes
if old_parameter in text:
regex = re.compile('^.*' + old_parameter + '.*$', MULTILINE)
return regex.sub(new_parameter, text)
return text + '\n' + new_parameter
| {
"repo_name": "fholiveira/tlpconfig",
"path": "tlp/models/configuration.py",
"copies": "1",
"size": "1672",
"license": "bsd-3-clause",
"hash": -6344298432929767000,
"line_mean": 35.347826087,
"line_max": 81,
"alpha_frac": 0.5592105263,
"autogenerated": false,
"ratio": 3.9341176470588235,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49933281733588236,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain, groupby, islice
from operator import itemgetter
from typing import Any, Callable, Iterable, Sequence
__all__ = [
'all_equal',
'butlast',
'concat',
'cons',
'dedupe',
'first',
'flatten',
'head',
'init',
'last',
'nth',
'partial_flatten',
'quantify',
'rest',
'reverse',
'sorted_index',
'tail',
'take',
]
def first(seq: Sequence) -> Any:
"""
Returns first element in a sequence.
>>> first([1, 2, 3])
1
"""
return next(iter(seq))
def second(seq: Sequence) -> Any:
"""
Returns second element in a sequence.
>>> second([1, 2, 3])
2
"""
return seq[1]
def last(seq: Sequence) -> Any:
"""
Returns the last item in a Sequence
>>> last([1, 2, 3])
3
"""
return seq[-1]
def butlast(seq: Sequence) -> Sequence:
"""
Returns all but the last item in sequence
>>> butlast([1, 2, 3])
[1, 2]
"""
return seq[:-1]
def rest(seq: Sequence) -> Any:
"""
Returns remaining elements in a sequence
>>> rest([1, 2, 3])
[2, 3]
"""
return seq[1:]
def reverse(seq: Sequence) -> Sequence:
"""
Returns sequence in reverse order
>>> reverse([1, 2, 3])
[3, 2, 1]
"""
return seq[::-1]
def cons(item: Any, seq: Sequence) -> chain:
""" Adds item to beginning of sequence.
>>> list(cons(1, [2, 3]))
[1, 2, 3]
"""
return chain([item], seq)
def lazy_flatten(seq: Iterable) -> Iterable:
"""
Returns a generator which yields items from a flattened version
of the sequence.
"""
for item in seq:
if isinstance(item, Iterable) and not isinstance(item, (str, bytes)):
yield from flatten(item)
else:
yield item
def flatten(seq: Iterable) -> Iterable:
""" Returns a flatten version of sequence.
>>> flatten([1, [2, [3, [4, 5], 6], 7]])
[1, 2, 3, 4, 5, 6, 7]
"""
return type(seq)(lazy_flatten(seq)) # type: ignore
def partial_flatten(seq: Iterable) -> Iterable:
"""
Returns partially flattened version of sequence.
>>> partial_flatten(((1,), [2, 3], (4, [5, 6])))
(1, 2, 3, 4, [5, 6])
"""
return type(seq)(reduce(concat, seq)) # type: ignore
def lazy_dedupe(seq: Sequence, key: Callable=None) -> Iterable:
"""
Returns a generator which which yields items in the sequence skipping
duplicates.
"""
seen = set() # type: set
for item in seq:
val = item if key is None else key(item)
if val not in seen:
yield item
seen.add(val)
def sorted_index(seq: Sequence, item: Any, key: str=None) -> int:
"""
>>> sorted_index([10, 20, 30, 40, 50], 35)
3
"""
keyfn = itemgetter(key) if key is not None else None
cp = sorted(cons(item, seq), key=keyfn)
return cp.index(item)
def dedupe(seq: Sequence, key: Callable=None) -> Iterable:
"""
Removes duplicates from a sequence while maintaining order
>>> dedupe([1, 5, 2, 1, 9, 1, 5, 10])
[1, 5, 2, 9, 10]
"""
return type(seq)(lazy_dedupe(seq, key)) # type: ignore
def concat(seqX: Sequence, seqY: Sequence) -> Sequence:
"""
Joins two sequences together, returning a single combined sequence.
Preserves the type of passed arguments.
>>> concat((1, 2, 3), (4, 5, 6))
(1, 2, 3, 4, 5, 6)
"""
chained = chain(seqX, seqY)
if isinstance(seqX, type(seqY)):
return type(seqX)(chained) # type: ignore
return list(chained)
def take(n: int, iterable: Iterable) -> Iterable:
"""
Return first n items of the iterable as a list.
>>> take(2, range(1, 10))
[1, 2]
"""
return list(islice(iterable, n))
def nth(iterable: Iterable, n: int, default: Any=None) -> Any:
"""
Returns the nth item or a default value.
>>> nth([1, 2, 3], 1)
2
"""
return next(islice(iterable, n, None), default)
def all_equal(iterable: Iterable) -> bool:
"""
Returns True if all the elements are equal to each other.
>>> all_equal([True, True])
True
"""
g = groupby(iterable)
return next(g, True) and not next(g, False) # type: ignore
def quantify(iterable: Iterable, pred: Callable=bool) -> int:
"""
Returns count of how many times the predicate is true.
>>> quantify([True, False, True])
2
"""
return sum(map(pred, iterable))
# Define some common aliases
head = first
tail = rest
init = butlast
| {
"repo_name": "Jackevansevo/basic-utils",
"path": "basic_utils/seq_helpers.py",
"copies": "1",
"size": "4548",
"license": "mit",
"hash": 1156020217288231400,
"line_mean": 19.7671232877,
"line_max": 77,
"alpha_frac": 0.5672823219,
"autogenerated": false,
"ratio": 3.404191616766467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.946783340369729,
"avg_score": 0.000728106993835328,
"num_lines": 219
} |
from functools import reduce
from itertools import chain
from django.conf import settings
from django.apps import apps
from django.utils.text import capfirst
from ..utils.functools import factual, first, pick, omit
from ..utils.translation import get_string, get_model_names
from ..utils.urls import exists_import
from .library import register, Component, ComplexComponent, Tag
from .markup import Link, Caption
class MenuItem(Tag):
el = 'li'
def get_defaults(self):
return {
'divider': False,
'include': True,
'disabled': False,
'link': {'url': None}}
def get_nodelist(self):
if self.props.divider or not self.props.include:
return ''
return self.inject(Link, self.props.link, self.props.caption)
def resolve_props(self):
return {'caption': self.inject(Caption, self.props.caption)}
def tweak(self):
if not self.props.include:
self.dom.empty()
elif self.props.divider:
self.dom.add_class('nav-divider')
else:
self.dom.add_class(self.get_class_set('disabled', 'active'))
super().tweak()
class Dropdown(Tag):
el = 'li'
def get_defaults(self):
return {'active': False}
def resolve_props(self):
result = {}
if self.props.active:
result['class'] = 'active'
if self.props.nodelist:
if not 'caret' in self.props:
result['caret'] = self.inject(
Tag, {'el': 'span', 'class': 'caret'})
else:
result['caret'] = ''
return result
def get_nodelist(self):
return ''.join([
self.inject(
Link,
self.props.link,
''.join([
self.inject(Caption, self.props.caption),
self.props.caret]),
attrs={
'class': 'dropdown-toggle',
'data-toggle': 'dropdown'}),
'' if not self.props.nodelist else self.inject(
Tag,
{'el': 'ul', 'class': 'dropdown-menu'},
self.props.nodelist)])
class Menu(Tag):
el = 'ul'
inline = True
def get_items(self):
return self.props.get('items', [])
def resolve_item(self, item):
link = item.get('link', {})
if 'divider' in item:
return {}
elif 'model' in item:
model = apps.get_model(*item['model'].split('.'))
if not 'title' in item['caption']:
item['caption']['title'] = get_model_names(model)[-1]
(prefix, url) = map(get_string, get_model_names(model))
if not 'url' in link:
link['url'] = url
item['active'] = self.check_active(
url, *map(
lambda suffix: '-'.join([prefix, suffix]),
['add', 'edit']))
elif not 'active' in item:
if link.get('url') and link.get('reverse', True):
item['active'] = self.check_active(link.get('url'))
else:
item['active'] = False
if 'include' in item and item['include'] is None:
item['include'] = item.get('active')
if item.get('disabled'):
link.update({'url': '#', 'reverse': False})
return dict(item, link=link)
def resolve_items(self):
return map(self.resolve_item, self.get_items())
def filter_items(self, fn=lambda i: True, items=None):
if items is None:
items = self.resolve_items()
return list(filter(lambda i: i.get('include', True) and fn(i), items))
def render_item(self, item):
if 'dropdown' in item:
return self.render_dropdown(item) or ''
return self.inject(MenuItem, item)
def render_items(self, items=None):
if items is None:
items = self.resolve_items()
return ''.join(map(self.render_item, items))
def render_dropdown(self, item):
items = list(item['dropdown'].resolve_items())
include_items = self.filter_items(items=items)
active_items = self.filter_items(lambda i: i.get('active'), items)
if len(include_items):
toggle = item['caption'].get('toggle', True)
props = dict({
'link': {},
'nodelist': '',
'active': item.get('active') or bool(active_items)},
**pick(item, 'caption', 'attrs', 'link', 'caret'))
display = first(props['active'] and active_items or include_items)
if item['caption'].get('collapse', True) and len(include_items) == 1:
if not props['active']:
props['link'].update(pick(display, 'url'))
if toggle:
props.update(pick(display, 'caption'))
return self.inject(MenuItem, props)
if toggle and props['active']:
props.update(pick(display, 'caption'))
props['nodelist'] = self.render_items(self.filter_items(
lambda i: not i.get('active'),
items))
else:
props['nodelist'] = self.render_items(items)
return self.inject(Dropdown, props)
def get_defaults(self):
return {'pills': False, 'tabs': False, 'stacked': False}
def resolve_attrs(self):
return {'class': self.get_class_set(
'pills', 'tabs', 'stacked', prefix='nav', include='nav')}
def get_nodelist(self):
return self.render_items()
@register.era
class MainMenu(Menu):
def get_items(self):
return list(chain(*map(
lambda cls: self.insert(cls).get_items(),
factual(map(
lambda module: getattr(
module,
''.join([capfirst(module.__name__.split('.')[0]), 'Menu']),
None),
factual(map(
lambda app: exists_import('.'.join([app, 'components'])),
getattr(settings, 'MAIN_MENU', []))))))))
| {
"repo_name": "doctorzeb8/django-era",
"path": "era/templatetags/menu.py",
"copies": "1",
"size": "6168",
"license": "mit",
"hash": -5668869941151844000,
"line_mean": 33.2666666667,
"line_max": 81,
"alpha_frac": 0.5230220493,
"autogenerated": false,
"ratio": 4.1202404809619235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5143262530261924,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain
from math import sqrt
from operator import mul
def even(n):
# n ~ 2*k
return n % 2 == 0
def odd(n):
# n ~ 2*k + 1
return n % 2 != 0
def gcd(a, b):
"""
Greatest common divisor (greatest common factor)
Notes
---------
Euclidean algorithm:
a > b > r_1 > r_2 > ... > r_n
a = b*q + r
b = r_1*q_1 + r_2
...
r_n-1 = r_n*q_n
gcd(a,b) = gcd(b,r)
gcd(a,0) = a
"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
"""
Least common multiplier
"""
return abs(a * b) // gcd(a, b)
def extended_euclidian(a, b):
"""
returns (x, y, d) such that:
x*a + y*b = d, d = gcd(a,b)
r_1 = a - b * q_1
r_2 = b - r_1 * q_2
...
r_n = r_n-2 - r_n-1*q_n = a * x_n + b * y_n
x_n = x_n-2 - x_n-1 * q_n
y_n = y_n-2 - y_n-1 * q_n
"""
# Naive version:
# x2, y2 = 1, 0
# x1, y1 = 0, 1
# while b > 0:
# q, a, b = a // b, b, a % b
# x = x2 - x1 * q
# y = y2 - y1 * q
# x2, x1 = x1, x
# y2, y1 = y1, y
# return x2, y2, a
# Recursive version O(log^2(a))
# suppose we know x1, y1 for (b, a%b) and a%b = a - b*q
# then b*x1 + (a%b)*y1 = a*y1 + b*(x1 - y1*q)
if a == b == 0:
return 0, 0, 0
if b == 0:
return 1, 0, a
x, y, d = extended_euclidian(b, a % b)
return y, x - y * (a // b), d
def binomial(n, k):
"""
Chose k objects from n.
(n, k) = n!/(k!(n-k)!)
Pascal's rule:
(n + 1, k) = (n, k) + (n, k - 1)
(k, k) = 0
"""
# if k > n:
# return 0
# if k == n or k == 0:
# return 1
# return binomial(n - 1, k) + binomial(n - 1, k - 1)
res = 1
for i in range(1, k + 1):
res = res * (n - i + 1) // i
return res
def binomial_table(n, k):
"""
Pascal's triangle:
(n, k) = C[n][k]
"""
C = [[0] * (i + 1) for i in range(n + 1)]
for i in range(n + 1):
for j in range((min(i, k) + 1)):
if j == 0 or j == i:
C[i][j] = 1
else:
C[i][j] = C[i - 1][j - 1] + C[i - 1][j]
return C[n][k]
def factorial(n):
"""
Returns n! = n(n-1)(n-2)...
"""
return reduce(mul, range(1, n), 1)
def fibonacci(n):
"""
Returns nth fibonacci number F(n)
F(n) = F(n-2) + F(n-1)
"""
k, m = 1, 1
if n < 2:
return n
for i in range(2, n):
k, m = m, k + m
return m
def isqrt(x):
# isqrt(x) = floor(sqrt(x))
return int(sqrt(x))
def coprime(a, b):
"""
Check if two integers are coprime. Integers are coprime if the only
integer that divides both of them is 1. That is gcd(a,b) = 1.
Parameters
----------
a : int
input values
b : int
input value
Returns
-------
bool
Whether integers are coprime
"""
return gcd(a, b) == 1
def is_prime(n: int) -> bool:
if n in (2, 3):
return True
if n < 2 or n % 2 == 0 or n % 3 == 0:
return False
# all numbers of the form (6n +- 1)
for q in range(5, isqrt(n) + 1, 6):
if n % q == 0 or n % (q + 2) == 0:
return False
return True
def sieve(n):
"""
Get all primes <= n
"""
s = [True] * (n + 1)
for i in range(2, isqrt(n) + 1):
if s[i]:
for j in range(i + i, n + 1, i):
s[j] = False
return [i for i in range(2, n + 1) if s[i]]
def factorize(n):
"""
Prime decomposition
Decomposes integer n into
n = p1^a1 * p2^a2 * pn^an
where p_i are primes and a_i are their exponents
Parameters
----------
n : int
integer to factorize
Returns
-------
factors : list
list of the prime factors, together with their exponents
Examples
--------
>>> factorize(2434500)
[(2, 2), (3, 2), (5, 3), (541, 1)]
"""
if n in (0, 1):
return [(n, 1)]
factors = []
if n < 0:
factors.append((-1, 1))
n = - n
# check 2, 3, then all integers in form q = 6k +- 1
for q in chain((2, 3), range(5, isqrt(n) + 1, 6)):
# q = 6k - 1
a = 0
while n % q == 0:
# q is prime because n already divided by its prime factors
n //= q
a += 1
if a > 0:
factors.append((q, a))
# 6k + 1
q += 2
a = 0
while n % q == 0:
# q is prime because n already divided by its prime factors
n //= q
a += 1
if a > 0:
factors.append((q, a))
if n != 1:
factors.append((n, 1))
return factors
def prime_pi(n):
"""
Number of primes <= n
"""
if n < 2:
return 0
primes = sieve(n)
return len(primes)
def euler_phi(n):
"""
Number of coprimes <= n
"""
if n == 1:
return 1
phi = n
for p, a in factorize(n):
phi -= phi // p
return phi
def binpow(x, r):
"""Binary exponential algorithm"""
# recursive implementation:
# if even(r):
# ans = binpow(x, r // 2)
# return ans * ans
# else:
# return binpow(x, r - 1) * x
ans = 1
while r > 0:
if odd(r):
ans *= x
x *= x
r = r // 2
return ans
def linear_diophantine(a, b, c):
"""Solve ax + by = c, where x, y are integers
1. solution exists iff c % gcd(a,b) = 0
2. all solutions have form (x0 + b'k, y0 - a'k)
Returns
-------
None if no solutions exists
(x0, y0, a', b') otherwise
"""
# d = pa + qb
p, q, d = extended_euclidian(a, b)
if d == 0 or c % d != 0:
return None
# ax + by = c <=> a'x + b'y = c'
a, b, c = a // d, b // d, c // d
return p * c, q * c, a, b
| {
"repo_name": "vadimadr/python-algorithms",
"path": "algorithms/number_theory.py",
"copies": "1",
"size": "5909",
"license": "mit",
"hash": -5165591191912861000,
"line_mean": 17.1815384615,
"line_max": 71,
"alpha_frac": 0.4313758673,
"autogenerated": false,
"ratio": 2.779397930385701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3710773797685701,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain
from operator import attrgetter
from os import name, system
from typing import Any, List, Sequence, Tuple
__all__ = [
'clear',
'getattrs',
'map_getattr',
'rgetattr',
'rsetattr',
'slurp',
'to_string',
]
def slurp(fname: str) -> str:
"""
Reads a file and all its contents, returns a single string
"""
with open(fname, 'r') as f:
data = f.read()
return data
def clear() -> None:
"""
Clears the terminal screen from python, operating system agnostic
"""
system('cls' if name == 'nt' else 'clear')
def to_string(objects: List[object], sep: str=", ") -> str:
"""
Converts a list of objects into a single string
>>> to_string([1, 2, 3])
'1, 2, 3'
"""
return sep.join(map(str, objects))
def getattrs(obj: object, keys: Sequence[str]) -> Tuple[Any, ...]:
"""Supports getting multiple attributes from a model at once"""
return tuple(getattr(obj, key) for key in keys)
def map_getattr(attr: str, object_seq: Sequence[object]) -> Tuple[Any, ...]:
"""
Returns a map to retrieve a single attribute from a sequence of objects
"""
return tuple(map(attrgetter(attr), object_seq))
def rgetattr(obj: object, attrs: str) -> Any:
"""Get a nested attribute within an object"""
return reduce(getattr, chain([obj], attrs.split('.')))
def rsetattr(obj: object, attr: str, val: Any) -> None:
"""Sets a nested attribute within an object"""
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
| {
"repo_name": "Jackevansevo/basic-utils",
"path": "basic_utils/core.py",
"copies": "1",
"size": "1624",
"license": "mit",
"hash": 8777713359020928000,
"line_mean": 24.375,
"line_max": 76,
"alpha_frac": 0.6256157635,
"autogenerated": false,
"ratio": 3.5929203539823007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47185361174823004,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain
from urllib.parse import unquote
from django.contrib.contenttypes.forms import BaseGenericInlineFormSet
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.db.models import OneToOneField
from django.forms import Form as EmptyForm
from django.forms.fields import DateField, DateTimeField, TimeField
from django.forms.models import modelform_factory, modelformset_factory, inlineformset_factory
from django.http import Http404
from django.shortcuts import redirect
from django.utils.functional import cached_property
from django.views.generic.edit import FormMixin
from ..forms import FrozenSelect, DateTimePicker
from ..components import Form
from ..utils.functools import just, call, swap, throw, \
pluck, first, select, separate, factual, case, omit, pick, map_keys, map_values
from ..utils.translation import _, inflect, get_string, get_model_names
from .base import BaseView
class FormFieldsOverridesMixin:
def get_formfields_overrides(self):
return {
DateField: (DateTimePicker, {'key': 'date'}),
DateTimeField: (DateTimePicker, {'key': 'datetime'}),
TimeField: (DateTimePicker, {'key': 'time'})}
def override_form_fields(self, form):
overrides = self.get_formfields_overrides()
for field in form.fields.values():
match = overrides.get(field.__class__, None)
if match:
Widget, kw = match
field.widget = Widget(**kw)
class ModelFormMixin(FormFieldsOverridesMixin):
empty_form = False
@cached_property
def model(self):
return getattr(
self,
'model_class',
hasattr(self.form_class, '_meta') \
and self.form_class._meta.model or None)
@cached_property
def instance(self):
return call(getattr(self, 'get_instance', lambda: None))
def get_model_name(self, key='single', fn=get_string, model=None):
return case(key, dict(
zip(('single', 'plural'),
map(fn, get_model_names(model or self.model)))))
def get_model_fields(self, model=None):
return pluck((model or self.model)._meta.fields[1:], 'name')
def get_fields(self):
return self.form_class and self.form_class._meta.fields \
or getattr(self, 'fields', self.get_model_fields())
def get_choices(self, model, field_name):
field = model._meta.get_field(field_name)
if getattr(field, 'rel', None):
return [(obj.pk, str(obj)) for obj in field.rel.to.objects.all()]
return list(field.choices)
def get_relation_fields(self):
result = []
if self.model:
for field_name in self.get_fields():
try:
field = self.model._meta.get_field(field_name)
if isinstance(field, OneToOneField):
result.append(field)
except FieldDoesNotExist:
pass
return result
def get_form_class(self):
if self.form_class:
return self.form_class
elif self.empty_form:
return EmptyForm
else:
return modelform_factory(self.model, fields=self.get_fields())
def get_form_data(self, **kw):
result = pick(kw, 'prefix', 'initial', 'instance')
if self.request.POST:
result['data'] = self.request.POST
if self.request.FILES:
result['files'] = self.request.FILES
if self.empty_form:
return omit(result, 'instance')
return result
def get_form(self):
form = self.get_form_class()(**self.get_form_kwargs())
for field in pluck(self.get_relation_fields(), 'name'):
form.fields.pop(field)
return self.prepare_form(form)
def get_relation(self, field, **kw):
kw = kw or {'fields': self.get_model_fields(field.rel.to)}
form = modelform_factory(field.rel.to, **kw)(
**self.get_form_data(
prefix=field.name,
instance=getattr(self.instance, field.name, None)))
return {'field': field, 'form': form, 'required': not field.blank}
def get_relations(self):
return factual(map(
lambda field: swap(field, getattr(
self,
'get_{0}_relation'.format(field.name),
self.get_relation)),
self.get_relation_fields()))
def prepare_form(self, form):
if not self.empty_form:
self.override_form_fields(form)
return form
class FormsetsMixin(ModelFormMixin):
def get_formsets(self):
return []
def get_formset_factory(self, formset_model, **kw):
if 'matrix' in kw:
max_num = len(self.get_choices(formset_model, kw['matrix']))
kw = dict({k: max_num for k in kw.pop('auto_num', ('extra', 'max_num'))}, **kw)
kw['widgets'] = {kw.pop('matrix'): FrozenSelect()}
if not 'fields' in kw and not 'form' in kw:
kw['fields'] = self.get_model_fields(formset_model)
if not 'constructor' in kw:
kw['parent_model'] = kw.pop('model', self.model)
if hasattr(formset_model, 'content_object'):
ct_field = kw.pop('ct_field', 'content_type')
fk_field = kw.pop('fk_field', 'object_id')
result = modelformset_factory(
formset_model,
formset=BaseGenericInlineFormSet,
exclude=(ct_field, fk_field),
**omit(kw, 'parent_model', 'for_concrete_model'))
options = formset_model._meta
result.ct_field = options.get_field(ct_field)
result.ct_fk_field = options.get_field(fk_field)
result.for_concrete_model = kw.get('for_concrete_model', True)
return result
return kw.pop('constructor', inlineformset_factory)(model=formset_model, **kw)
def get_formset_data(self, factory, **kw):
result = self.get_form_data(instance=self.instance, **kw)
if 'matrix' in kw:
exclude = [] if not hasattr(factory, 'fk') else list(map(
lambda obj: getattr(obj, kw['matrix']),
factory.model.objects.filter(**{factory.fk.name: self.instance})))
result['initial'] = list(map(
lambda choice: {kw['matrix']: choice},
filter(
lambda choice: not choice in exclude,
map(first, self.get_choices(factory.model, kw['matrix'])))))
return result
def inline_formset(self, formset_model, **kw):
factory = self.get_formset_factory(formset_model, **kw.copy())
prefix = self.get_model_name('plural', model=factory.model)
get_data = getattr(self, 'get_{0}_formset_data'.format(prefix), self.get_formset_data)
formset = factory(**get_data(factory, **dict(omit(kw, 'constructor'), prefix=prefix)))
for form in formset.forms:
self.override_form_fields(form)
if formset.can_delete and formset.validate_min:
for form in formset.forms[:formset.min_num]:
form.fields['DELETE'].widget.attrs['disabled'] = True
return formset
class FormView(BaseView, FormsetsMixin, FormMixin):
use_prefix = False
long_term = False
components = {'content': Form}
form_props = {}
success_redirect = 'index'
success_message = None
actions = [{
'icon': 'check-square',
'title': _('Save'),
'level': 'success'}]
def get_attrs_dict(self, *attrs):
return dict(map(lambda x: (x, call(getattr(self, 'get_' + x))), attrs))
def get_prefix(self):
return self.prefix or (self.use_prefix and self.about or None)
def get_form_kwargs(self):
return self.get_form_data(**dict(
{} if not self.instance else {'instance': self.instance},
**self.get_attrs_dict('prefix', 'initial')))
def get_members(self):
result = self.get_attrs_dict('form', 'relations', 'formsets')
return result
def get_all_forms(self, **kw):
return chain(
[kw['form']],
pluck(kw['relations'], 'form'),
*pluck(kw['formsets'], 'forms'))
def get_media(self, **kw):
return reduce(
lambda x, y: x and (x + y) or y,
pluck(self.get_all_forms(**kw), 'media'))
def get_actions(self):
return self.actions
def get_form_props(self):
result = self.form_props
result['actions'] = self.get_actions()
if self.check_is_long():
result['spinner'] = 'spinner'
return result
def get_context_data(self, **kw):
members = kw or self.get_members()
return dict(
super().get_context_data(**members),
**map_keys(
lambda key: key if not self.use_prefix else '_'.join(
[self.get_prefix(), key]),
dict(
members,
props=self.get_form_props())))
def get_success_message(self, **kw):
return self.success_message
def get_success_redirect(self, **kw):
return reverse(getattr(self, 'success_redirect', self.url_match.url_name))
def success_finish(self, **kw):
self.send_message('success', self.get_success_message(**kw))
return redirect(self.get_success_redirect(**kw))
def save_relations(self, form, *relations):
for rel in relations:
if rel['form'].has_changed():
setattr(form.instance, rel['field'].name, rel['form'].save())
def save_form(self, form):
hasattr(form, 'instance') and form.instance.save()
def save_formsets(self, form, *formsets):
for formset in formsets:
formset.instance = form.instance
formset.save()
def process_valid(self, **kw):
self.save_relations(kw['form'], *kw['relations'])
self.save_form(kw['form'])
self.save_formsets(kw['form'], *kw['formsets'])
return self.success_finish(**kw)
def process_invalid(self, **kw):
errors = chain(*map(call, pluck(
self.get_all_forms(**kw), 'non_field_errors')))
for message in errors:
self.send_message('error', message)
return self.render_to_response(self.get_context_data(**kw), status=400)
def check_is_long(self):
return self.long_term
def check_is_valid(self, **kw):
return all(map(lambda x: x.is_valid(), self.get_all_forms(**kw)))
def process(self, **kw):
if self.check_is_valid(**kw):
map(
lambda x: x.save(commit=False),
chain(
hasattr(kw['form'], 'save') and [kw['form']] or [],
*pluck(kw['relations'], 'form')))
return self.process_valid(**kw)
return self.process_invalid(**kw)
def post(self, request, *args, **kw):
return self.process(**self.get_members())
class MatrixView(FormView):
empty_form = True
def get_formsets(self):
return list(map(
lambda matrix: self.inline_formset(
matrix[0],
constructor=modelformset_factory,
matrix=matrix[1]),
self.models))
def save_formsets(self, form, *formsets):
for formset in formsets:
formset.save()
class ObjectView(FormView):
def get_instance(self):
pk = self.url_match.kwargs.get('pk')
try:
return pk and self.model.objects.get(pk=pk)
except ObjectDoesNotExist:
raise Http404
def get_form_props(self):
return dict(
super().get_form_props(),
panels=True,
title=self.instance and str(self.instance) or self.model._meta.verbose_name)
def get_success_redirect(self, **kw):
if 'next' in self.request.GET:
return unquote(self.request.GET['next'])
return reverse(self.get_model_name('plural'))
def get_success_message(self, **kw):
return inflect(_(
'The %(name)s "%(obj)s" was changed successfully.' \
if self.url_match.kwargs else \
'The %(name)s "%(obj)s" was added successfully.') % {
'name': self.get_model_name(fn=just),
'obj': str(kw['form'].instance)})
| {
"repo_name": "doctorzeb8/django-era",
"path": "era/views/forms.py",
"copies": "1",
"size": "12534",
"license": "mit",
"hash": -8050496049777788000,
"line_mean": 35.5422740525,
"line_max": 94,
"alpha_frac": 0.5829743099,
"autogenerated": false,
"ratio": 3.9377945334590008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5020768843359,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain
import networkx as nx
from lib.nodes import build_mac_table
def import_vis_data(graph, nodes, vis_data):
macs = build_mac_table(nodes)
nodes_a = map(lambda d: 2 * [d['primary']],
filter(lambda d: 'primary' in d, vis_data))
nodes_b = map(lambda d: [d['secondary'], d['of']],
filter(lambda d: 'secondary' in d, vis_data))
graph.add_nodes_from(map(lambda a, b:
(a, dict(primary=b, node_id=macs.get(b))),
*zip(*chain(nodes_a, nodes_b))))
edges = filter(lambda d: 'neighbor' in d, vis_data)
graph.add_edges_from(map(lambda d: (d['router'], d['neighbor'],
dict(tq=float(d['label']))), edges))
def import_alfred_vis_data(graph, nodes, vis_data):
vis_data = list(filter(lambda d: 'vlans' in d, vis_data))
node_ids = map(lambda d: d['node_id'], vis_data)
def vd_to_mac(vd):
return nodes[vd['node_id']]['nodeinfo']['network']['mac']
def add_two_to_mac(mac):
return "%s%x" % (mac[:-2], int(mac[-2:], 16)+2)
graph.add_nodes_from((vd_to_mac(vd), dict(primary=vd_to_mac(vd), node_id=vd['node_id'])) for vd in vis_data)
edges = list(chain.from_iterable([(vd_to_mac(data), add_two_to_mac(neighbor), dict(tq=255/float(link['tq']))) for neighbor, link in data['vlans'].items()] for data in vis_data if len(data['vlans']) > 0))
graph.add_edges_from(edges)
def mark_vpn(graph, vpn_macs):
components = map(frozenset, nx.weakly_connected_components(graph))
components = filter(vpn_macs.intersection, components)
nodes = reduce(lambda a, b: a | b, components, set())
for node in nodes:
for k, v in graph[node].items():
v['vpn'] = True
def to_multigraph(graph):
def f(a):
node = graph.node[a]
return node['primary'] if node else a
def map_node(node, data):
return (data['primary'],
dict(node_id=data['node_id'])) if data else (node, dict())
digraph = nx.MultiDiGraph()
digraph.add_nodes_from(map(map_node, *zip(*graph.nodes_iter(data=True))))
digraph.add_edges_from(map(lambda a, b, data: (f(a), f(b), data),
*zip(*graph.edges_iter(data=True))))
return digraph
def merge_nodes(graph):
def merge_edges(data):
tq = min(map(lambda d: d['tq'], data))
vpn = all(map(lambda d: d.get('vpn', False), data))
return dict(tq=tq, vpn=vpn)
multigraph = to_multigraph(graph)
digraph = nx.DiGraph()
digraph.add_nodes_from(multigraph.nodes_iter(data=True))
edges = chain.from_iterable([[(e, d, merge_edges(
multigraph[e][d].values()))
for d in multigraph[e]] for e in multigraph])
digraph.add_edges_from(edges)
return digraph
def to_undirected(graph):
multigraph = nx.MultiGraph()
multigraph.add_nodes_from(graph.nodes_iter(data=True))
multigraph.add_edges_from(graph.edges_iter(data=True))
def merge_edges(data):
tq = max(map(lambda d: d['tq'], data))
vpn = all(map(lambda d: d.get('vpn', False), data))
return dict(tq=tq, vpn=vpn, bidirect=len(data) == 2)
graph = nx.Graph()
graph.add_nodes_from(multigraph.nodes_iter(data=True))
edges = chain.from_iterable([[(e, d, merge_edges(
multigraph[e][d].values()))
for d in multigraph[e]] for e in multigraph])
graph.add_edges_from(edges)
return graph
| {
"repo_name": "FreifunkBremen/ffmap-backend",
"path": "lib/graph.py",
"copies": "1",
"size": "3515",
"license": "bsd-3-clause",
"hash": -1128570185275322600,
"line_mean": 35.2371134021,
"line_max": 207,
"alpha_frac": 0.5974395448,
"autogenerated": false,
"ratio": 3.160971223021583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4258410767821583,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import chain, product
from operator import methodcaller
import numpy
import FIAT
from FIAT.polynomial_set import mis
from FIAT.reference_element import TensorProductCell
import gem
from gem.utils import cached_property
from finat.finiteelementbase import FiniteElementBase
from finat.point_set import PointSingleton, PointSet, TensorPointSet
class TensorProductElement(FiniteElementBase):
def __init__(self, factors):
super(TensorProductElement, self).__init__()
self.factors = tuple(factors)
shapes = [fe.value_shape for fe in self.factors if fe.value_shape != ()]
if len(shapes) == 0:
self._value_shape = ()
elif len(shapes) == 1:
self._value_shape = shapes[0]
else:
raise NotImplementedError("Only one nonscalar factor permitted!")
@cached_property
def cell(self):
return TensorProductCell(*[fe.cell for fe in self.factors])
@property
def degree(self):
return tuple(fe.degree for fe in self.factors)
@cached_property
def formdegree(self):
if any(fe.formdegree is None for fe in self.factors):
return None
else:
return sum(fe.formdegree for fe in self.factors)
@cached_property
def _entity_dofs(self):
return productise(self.factors, methodcaller("entity_dofs"))
@cached_property
def _entity_support_dofs(self):
return productise(self.factors, methodcaller("entity_support_dofs"))
def entity_dofs(self):
return self._entity_dofs
def space_dimension(self):
return numpy.prod([fe.space_dimension() for fe in self.factors])
@property
def index_shape(self):
return tuple(chain(*[fe.index_shape for fe in self.factors]))
@property
def value_shape(self):
return self._value_shape
@cached_property
def fiat_equivalent(self):
# FIAT TensorProductElement support only 2 factors
A, B = self.factors
return FIAT.TensorProductElement(A.fiat_equivalent, B.fiat_equivalent)
def _factor_entity(self, entity):
# Default entity
if entity is None:
entity = (self.cell.get_dimension(), 0)
entity_dim, entity_id = entity
# Factor entity
assert isinstance(entity_dim, tuple)
assert len(entity_dim) == len(self.factors)
shape = tuple(len(c.get_topology()[d])
for c, d in zip(self.cell.cells, entity_dim))
entities = list(zip(entity_dim, numpy.unravel_index(entity_id, shape)))
return entities
def _merge_evaluations(self, factor_results):
# Spatial dimension
dimension = self.cell.get_spatial_dimension()
# Derivative order
order = max(map(sum, chain(*factor_results)))
# A list of slices that are used to select dimensions
# corresponding to each subelement.
dim_slices = TensorProductCell._split_slices([c.get_spatial_dimension()
for c in self.cell.cells])
# A list of multiindices, one multiindex per subelement, each
# multiindex describing the shape of basis functions of the
# subelement.
alphas = [fe.get_indices() for fe in self.factors]
# A list of multiindices, one multiindex per subelement, each
# multiindex describing the value shape of the subelement.
zetas = [fe.get_value_indices() for fe in self.factors]
result = {}
for derivative in range(order + 1):
for Delta in mis(dimension, derivative):
# Split the multiindex for the subelements
deltas = [Delta[s] for s in dim_slices]
# GEM scalars (can have free indices) for collecting
# the contributions from the subelements.
scalars = []
for fr, delta, alpha, zeta in zip(factor_results, deltas, alphas, zetas):
# Turn basis shape to free indices, select the
# right derivative entry, and collect the result.
scalars.append(gem.Indexed(fr[delta], alpha + zeta))
# Multiply the values from the subelements and wrap up
# non-point indices into shape.
result[Delta] = gem.ComponentTensor(
reduce(gem.Product, scalars),
tuple(chain(*(alphas + zetas)))
)
return result
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
entities = self._factor_entity(entity)
entity_dim, _ = zip(*entities)
ps_factors = factor_point_set(self.cell, entity_dim, ps)
factor_results = [fe.basis_evaluation(order, ps_, e)
for fe, ps_, e in zip(self.factors, ps_factors, entities)]
return self._merge_evaluations(factor_results)
def point_evaluation(self, order, point, entity=None):
entities = self._factor_entity(entity)
entity_dim, _ = zip(*entities)
# Split point expression
assert len(self.cell.cells) == len(entity_dim)
point_dims = [cell.construct_subelement(dim).get_spatial_dimension()
for cell, dim in zip(self.cell.cells, entity_dim)]
assert isinstance(point, gem.Node) and point.shape == (sum(point_dims),)
slices = TensorProductCell._split_slices(point_dims)
point_factors = []
for s in slices:
point_factors.append(gem.ListTensor(
[gem.Indexed(point, (i,))
for i in range(s.start, s.stop)]
))
# Subelement results
factor_results = [fe.point_evaluation(order, p_, e)
for fe, p_, e in zip(self.factors, point_factors, entities)]
return self._merge_evaluations(factor_results)
@cached_property
def mapping(self):
mappings = [fe.mapping for fe in self.factors if fe.mapping != "affine"]
if len(mappings) == 0:
return "affine"
elif len(mappings) == 1:
return mappings[0]
else:
return None
def productise(factors, method):
'''Tensor product the dict mapping topological entities to dofs across factors.
:arg factors: element factors.
:arg method: instance method to call on each factor to get dofs.'''
shape = tuple(fe.space_dimension() for fe in factors)
dofs = {}
for dim in product(*[fe.cell.get_topology().keys()
for fe in factors]):
dim_dofs = []
topds = [method(fe)[d]
for fe, d in zip(factors, dim)]
for tuple_ei in product(*[sorted(topd) for topd in topds]):
tuple_vs = list(product(*[topd[ei]
for topd, ei in zip(topds, tuple_ei)]))
if tuple_vs:
vs = list(numpy.ravel_multi_index(numpy.transpose(tuple_vs), shape))
dim_dofs.append((tuple_ei, vs))
else:
dim_dofs.append((tuple_ei, []))
# flatten entity numbers
dofs[dim] = dict(enumerate(v for k, v in sorted(dim_dofs)))
return dofs
def factor_point_set(product_cell, product_dim, point_set):
"""Factors a point set for the product element into a point sets for
each subelement.
:arg product_cell: a TensorProductCell
:arg product_dim: entity dimension for the product cell
:arg point_set: point set for the product element
"""
assert len(product_cell.cells) == len(product_dim)
point_dims = [cell.construct_subelement(dim).get_spatial_dimension()
for cell, dim in zip(product_cell.cells, product_dim)]
if isinstance(point_set, TensorPointSet):
# Just give the factors asserting matching dimensions.
assert len(point_set.factors) == len(point_dims)
assert all(ps.dimension == dim
for ps, dim in zip(point_set.factors, point_dims))
return point_set.factors
# Split the point coordinates along the point dimensions
# required by the subelements.
assert point_set.dimension == sum(point_dims)
slices = TensorProductCell._split_slices(point_dims)
if isinstance(point_set, PointSingleton):
return [PointSingleton(point_set.point[s]) for s in slices]
elif isinstance(point_set, PointSet):
# Use the same point index for the new point sets.
result = []
for s in slices:
ps = PointSet(point_set.points[:, s])
ps.indices = point_set.indices
result.append(ps)
return result
raise NotImplementedError("How to tabulate TensorProductElement on %s?" % (type(point_set).__name__,))
| {
"repo_name": "FInAT/FInAT",
"path": "finat/tensor_product.py",
"copies": "1",
"size": "8822",
"license": "mit",
"hash": -817051972970562700,
"line_mean": 36.3813559322,
"line_max": 106,
"alpha_frac": 0.6115393335,
"autogenerated": false,
"ratio": 4.120504437178888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232043770678888,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import combinations
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.graph import Ug
from pybbn.graph.node import Clique
class Triangulator(object):
"""
Triangulator. Triangulates an undirected moralized graph and produces cliques in the process.
"""
@staticmethod
def triangulate(m):
"""
Triangulates the specified moralized graph.
:param m: Moralized undirected graph.
:return: Array of cliques.
"""
cliques = []
mm = Triangulator.duplicate(m)
while len(mm.get_nodes()) > 0:
node_clique = Triangulator.select_node(mm)
clique = Clique(node_clique.get_bbn_nodes())
if not Triangulator.is_subset(cliques, clique):
cliques.append(clique)
mm.remove_node(node_clique.node.id)
for edge in node_clique.edges:
m.add_edge(edge)
mm.add_edge(edge)
return cliques
@staticmethod
def duplicate(g):
"""
Duplicates a undirected graph.
:param g: Undirected graph.
:return: Undirected graph.
"""
ug = Ug()
for node in g.get_nodes():
ug.add_node(node)
for edge in g.get_edges():
ug.add_edge(edge)
return ug
@staticmethod
def generate_cliques(m):
"""
Generates a list of node cliques.
:param m: Graph.
:return: List of NodeCliques.
"""
def get_neighbors(node, m):
return [m.get_node(neighbor_id) for neighbor_id in m.get_neighbors(node.id)]
def get_weight(node, m):
return Triangulator.get_weight(node, m)
def get_edges_to_add(node, m):
return Triangulator.get_edges_to_add(node, m)
return (NodeClique(node, get_neighbors(node, m), get_weight(node, m), get_edges_to_add(node, m))
for node in m.get_nodes())
@staticmethod
def select_node(m):
"""
Selects a clique from the specified graph. Cliques are sorted by number of edges, weight, and id (asc).
:param m: Graph.
:return: Clique.
"""
return sorted(Triangulator.generate_cliques(m), key=lambda x: (len(x.edges), x.weight, x.node.id))[0]
@staticmethod
def get_weight(n, m):
"""
Gets the weight of a BBN node. The weight of a node is the product of the its weight with all its
neighbors' weight.
:param n: BBN node.
:param m: Graph.
:return: Weight.
"""
if len(m.neighbors[n.id]) == 0:
return n.get_weight()
weights = (m.get_node(neighbor_id).get_weight() for neighbor_id in m.get_neighbors(n.id))
return n.get_weight() * reduce(lambda x, y: x * y, weights)
@staticmethod
def get_edges_to_add(n, m):
"""
Gets edges to add.
:param n: BBN node.
:param m: Graph.
:return: Array of edges.
"""
neighbors = [m.get_node(i) for i in m.get_neighbors(n.id)]
return [Edge(neighbors[i], neighbors[j], EdgeType.UNDIRECTED)
for i, j in combinations(range(len(neighbors)), 2)
if not m.edge_exists(neighbors[i].id, neighbors[j].id)]
@staticmethod
def is_subset(cliques, clique):
"""
Checks if the specified clique is a subset of the specified list of cliques.
:param cliques: List of cliques.
:param clique: Clique.
:return: A boolean indicating if the clique is a subset.
"""
for i in range(len(cliques)):
if cliques[i].is_superset(clique):
return True
return False
class NodeClique:
"""
Node clique.
"""
def __init__(self, node, neighbors, weight, edges):
"""
Ctor.
:param node: BBN node.
:param neighbors: BBN nodes (neighbors).
:param weight: Weight.
:param edges: Edges.
"""
self.node = node
self.neighbors = neighbors
self.weight = weight
self.edges = edges
def get_bbn_nodes(self):
"""
Gets all the BBN nodes in this node clique.
:return: Array of BBN nodes.
"""
neighbors = [node for node in self.neighbors] + [self.node]
return neighbors
def __str__(self):
return f'{self.node.id}|weight={self.weight}|edges={len(self.edges)}|neighbors={len(self.neighbors)}'
| {
"repo_name": "vangj/py-bbn",
"path": "pybbn/pptc/triangulator.py",
"copies": "1",
"size": "4535",
"license": "apache-2.0",
"hash": -2164917198056017200,
"line_mean": 27.7025316456,
"line_max": 111,
"alpha_frac": 0.5691289967,
"autogenerated": false,
"ratio": 3.699021207177814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4768150203877814,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import compress
from operator import and_
from typing import List, Tuple, Dict, Optional
from hwt.doc_markers import internal
from hwt.hdl.operatorUtils import replace_input_in_expr
from hwt.hdl.sensitivityCtx import SensitivityCtx
from hwt.hdl.statements.statement import HdlStatement, HwtSyntaxError
from hwt.hdl.statements.utils.comparison import isSameStatementList, statementsAreSame
from hwt.hdl.statements.utils.ioDiscovery import HdlStatement_discover_enclosure_for_statements
from hwt.hdl.statements.utils.reduction import HdlStatement_merge_statement_lists, \
HdlStatement_try_reduce_list, is_mergable_statement_list
from hwt.hdl.statements.utils.signalCut import HdlStatement_cut_off_drivers_of_list
from hwt.hdl.types.enum import HEnum
from hwt.hdl.value import HValue
from hwt.hdl.valueUtils import isSameHVal
from hwt.serializer.utils import RtlSignal_sort_key
from hwt.synthesizer.rtlLevel.fill_stm_list_with_enclosure import fill_stm_list_with_enclosure
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
class SwitchContainer(HdlStatement):
"""
Structural container for switch statement for hdl rendering
:ivar ~.switchOn: select signal of switch
:ivar ~.cases: list of tuples (value, statements)
:ivar ~.default: list of statements (for branch "default")
:ivar ~._case_value_index: dictionary {value:index} for every case in cases
:ivar ~._case_enclosed_for: list of sets of enclosed signal for each case branch
:ivar ~._default_enclosed_for: set of enclosed signals for branch default
"""
def __init__(self, switchOn: RtlSignal,
cases: List[Tuple[HValue, List[HdlStatement]]],
default: List[HdlStatement]=None,
parentStm: HdlStatement=None,
event_dependent_from_branch: Optional[int]=None):
super(SwitchContainer, self).__init__(
parentStm=parentStm,
event_dependent_from_branch=event_dependent_from_branch)
self.switchOn = switchOn
self.cases = cases
self.default = default
self._case_value_index = {}
for i, (v, _) in enumerate(cases):
assert v not in self._case_value_index, v
self._case_value_index[v] = i
self._case_enclosed_for = None
self._default_enclosed_for = None
@internal
def _cut_off_drivers_of(self, sig: RtlSignalBase):
"""
Doc on parent class :meth:`HdlStatement._cut_off_drivers_of`
"""
if self._sensitivity is not None or self._enclosed_for is not None:
raise NotImplementedError(
"Sensitivity and enclosure has to be cleaned first")
if len(self._outputs) == 1 and sig in self._outputs:
# this statement has only this output, eject this statement from its parent
self.parentStm = None # because new parent will be asigned immediately after cutting of
return self
sig.drivers.discard(self)
# try to cut off all statements which are drivers of specified signal
# in all branches
child_keep_mask = []
all_cut_off = True
new_default = None
if self.default:
new_default = []
child_keep_mask.clear()
case_eliminated = HdlStatement_cut_off_drivers_of_list(
sig, self.default, child_keep_mask, new_default)
all_cut_off &= case_eliminated
if case_eliminated:
self.rank -= 1
self.default = None
else:
self.default = list(compress(self.default, child_keep_mask))
new_cases = []
case_keepmask = []
for val, stms in self.cases:
new_case = []
child_keep_mask.clear()
case_eliminated = HdlStatement_cut_off_drivers_of_list(
sig, stms, child_keep_mask, new_case)
if case_eliminated:
self.rank -= 1
all_cut_off &= case_eliminated
case_keepmask.append(not case_eliminated)
_stms = list(compress(stms, child_keep_mask))
stms.clear()
stms.extend(_stms)
if new_case or new_default:
# if there is a default we need to add case even in empty
# to prevent falling to default
new_cases.append((val, new_case))
self.cases = list(compress(self.cases, case_keepmask))
assert not all_cut_off, "everything was cut of but this should be already known at start"
if new_cases or new_default:
# parts were cut off
# generate new statement for them
sel_sig = self.switchOn
n = self.__class__(sel_sig)
n.add_cases(new_cases)
if new_default:
n.Default(*new_default)
if self.parentStm is None:
ctx = n._get_rtl_context()
ctx.statements.add(n)
self._cut_off_drivers_of_regenerate_io(sig, n)
return n
@internal
def _clean_signal_meta(self):
self._case_enclosed_for = None
self._default_enclosed_for = None
HdlStatement._clean_signal_meta(self)
@internal
def _collect_io(self):
if isinstance(self.switchOn, RtlSignalBase):
self._inputs.append(self.switchOn)
for c, _ in self.cases:
if isinstance(c, RtlSignalBase):
self._inputs.append(c)
super(SwitchContainer, self)._collect_io()
@internal
def _collect_inputs(self) -> None:
if isinstance(self.switchOn, RtlSignalBase):
self._inputs.append(self.switchOn)
for c, _ in self.cases:
if isinstance(c, RtlSignalBase):
self._inputs.append(c)
super(SwitchContainer, self)._collect_inputs()
@internal
def _discover_enclosure(self) -> None:
assert self._enclosed_for is None
enclosure = self._enclosed_for = set()
case_enclosures = self._case_enclosed_for = []
outputs = self._outputs
for _, stms in self.cases:
c_e = HdlStatement_discover_enclosure_for_statements(stms, outputs)
case_enclosures.append(c_e)
self._default_enclosed_for = HdlStatement_discover_enclosure_for_statements(
self.default, outputs)
t = self.switchOn._dtype
if not self.default and len(self.cases) < t.domain_size():
# cases does not cover all branches
return
for s in outputs:
enclosed = True
for e in case_enclosures:
if s not in e:
enclosed = False
break
if enclosed and (not self.default or s in self._default_enclosed_for):
enclosure.add(s)
@internal
def _discover_sensitivity(self, seen) -> None:
"""
Doc on parent class :meth:`HdlStatement._discover_sensitivity`
"""
assert self._sensitivity is None, self
ctx = self._sensitivity = SensitivityCtx()
casual_sensitivity = set()
self.switchOn._walk_sensitivity(casual_sensitivity, seen, ctx)
if ctx.contains_ev_dependency:
raise HwtSyntaxError(
"Can not switch on event operator result", self.switchOn)
ctx.extend(casual_sensitivity)
for stm in self._iter_stms():
stm._discover_sensitivity(seen)
ctx.extend(stm._sensitivity)
@internal
def _fill_enclosure(self, enclosure: Dict[RtlSignalBase, HdlStatement]) -> None:
"""
:attention: enclosure has to be discoverd first use _discover_enclosure() method
"""
select = []
outputs = self._outputs
for e in sorted(enclosure.keys(), key=RtlSignal_sort_key):
if e in outputs:
select.append(e)
for (_, stms), e in zip(self.cases, self._case_enclosed_for):
fill_stm_list_with_enclosure(self, e, stms, select, enclosure)
e.update(select)
t = self.switchOn._dtype
default_required = len(self.cases) < t.domain_size()
if self.default is not None or default_required:
self.default = fill_stm_list_with_enclosure(
self, self._default_enclosed_for, self.default, select, enclosure)
self._default_enclosed_for.update(select)
self._enclosed_for.update(select)
def _iter_stms(self):
"""
Doc on parent class :meth:`HdlStatement._iter_stms`
"""
for _, stms in self.cases:
yield from stms
if self.default is not None:
yield from self.default
@internal
def _is_mergable(self, other) -> bool:
"""
:return: True if other can be merged into this statement else False
"""
if not isinstance(other, SwitchContainer):
return False
if not (self.switchOn is other.switchOn and
len(self.cases) == len(other.cases) and
is_mergable_statement_list(self.default, other.default)):
return False
for (vA, caseA), (vB, caseB) in zip(self.cases, other.cases):
if vA != vB or not is_mergable_statement_list(caseA, caseB):
return False
return True
@internal
def _merge_with_other_stm(self, other: "IfContainer") -> None:
"""
Merge other statement to this statement
"""
merge = HdlStatement_merge_statement_lists
newCases = []
for (c, caseA), (_, caseB) in zip(self.cases, other.cases):
newCases.append((c, merge(caseA, caseB)))
self.cases = newCases
if self.default is not None:
self.default = merge(self.default, other.default)
self._on_merge(other)
@internal
def _try_reduce(self) -> Tuple[List["HdlStatement"], bool]:
"""
Doc on parent class :meth:`HdlStatement._try_reduce`
"""
io_change = False
# try reduce the content of the case branches
new_cases = []
for val, statements in self.cases:
_statements, rank_decrease, _io_change = HdlStatement_try_reduce_list(
statements)
io_change |= _io_change
self.rank -= rank_decrease
new_cases.append((val, _statements))
self.cases = new_cases
# try reduce content of the defult branch
if self.default is not None:
self.default, rank_decrease, _io_change = HdlStatement_try_reduce_list(
self.default)
self.rank -= rank_decrease
io_change |= _io_change
# try reduce self
reduce_self = not self._condHasEffect()
if reduce_self:
if self.cases:
res = self.cases[0][1]
elif self.default is not None:
res = self.default
else:
res = []
else:
res = [self, ]
self._on_reduce(reduce_self, io_change, res)
if not self.default:
t = self.switchOn._dtype
if isinstance(t, HEnum):
dom_size = t.domain_size()
val_cnt = len(t._allValues)
if len(self.cases) == val_cnt and val_cnt < dom_size:
# bit representation is not fully matching enum description
# need to set last case as default to prevent latches
_, stms = self.cases.pop()
self.default = stms
return res, io_change
@internal
def _condHasEffect(self) -> bool:
"""
:return: True if statements in branches has different effect
"""
if not self.cases:
return False
# [TODO]
type_domain_covered = bool(self.default) or len(
self.cases) == self.switchOn._dtype.domain_size()
stmCnt = len(self.cases[0][1])
if type_domain_covered and reduce(
and_,
[len(stm) == stmCnt
for _, stm in self.cases],
True) and (self.default is None
or len(self.default) == stmCnt):
stms = list(self._iter_stms())
if statementsAreSame(stms):
return False
else:
return True
return True
@internal
def _replace_input(self, toReplace: RtlSignalBase,
replacement: RtlSignalBase) -> None:
isTopStatement = self.parentStm is None
self.switchOn = replace_input_in_expr(self, self.switchOn, toReplace,
replacement, isTopStatement)
for (_, stms) in self.cases:
for stm in stms:
stm._replace_input(toReplace, replacement)
if self.default is not None:
for stm in self.default:
stm._replace_input(toReplace, replacement)
self._replace_input_update_sensitivity_and_enclosure(toReplace, replacement)
@internal
def _replace_child_statement(self, stm: HdlStatement,
replacement:List[HdlStatement],
update_io:bool) -> None:
if update_io:
raise NotImplementedError()
for branch_list in (*(case_stms for _, case_stms in self.cases), self.default):
if branch_list is None:
continue
try:
i = branch_list.index(stm)
except ValueError:
# not in list
continue
self.rank -= stm.rank
branch_list[i:i + 1] = replacement
for rstm in replacement:
rstm._set_parent_stm(self)
# reset IO because it was shared with this statement
stm._destroy()
return
raise ValueError("Statement", stm, "not found in ", self)
def isSame(self, other: HdlStatement) -> bool:
"""
Doc on parent class :meth:`HdlStatement.isSame`
"""
if self is other:
return True
if self.rank != other.rank:
return False
if isinstance(other, SwitchContainer) \
and isSameHVal(self.switchOn, other.switchOn)\
and len(self.cases) == len(other.cases)\
and isSameStatementList(self.default, other.default):
for (ac, astm), (bc, bstm) in zip(self.cases, other.cases):
if not isSameHVal(ac, bc)\
or not isSameStatementList(astm, bstm):
return False
return True
return False
| {
"repo_name": "Nic30/HWToolkit",
"path": "hwt/hdl/statements/switchContainer.py",
"copies": "1",
"size": "14839",
"license": "mit",
"hash": 7286600100256340000,
"line_mean": 34.8429951691,
"line_max": 100,
"alpha_frac": 0.5796886583,
"autogenerated": false,
"ratio": 4.027958740499457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5107647398799458,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import count
import GF2
def add(*vectors):
return [ reduce(lambda x0, x1: x0 + x1, xs) for xs in zip(*vectors) ]
def minus(*vectors):
return [ reduce(lambda x0, x1: x0 - x1, xs) for xs in zip(*vectors) ]
def scalar_mult(constant, vector):
return list(map(lambda x: constant * x, vector))
def inverse_factorial(num):
for i in count(1):
num = num / i
if num == 1: return i
if num < 1: raise ValueError("number was not an even factorial.")
def inverse_natural_sum(num, start):
for i in range(start, 0, -1):
num = num - i
if num == 0: return i
if num < 0: raise ValueError("number is not a natural sum.")
def permutations_unordered_no_repeat(collection):
def reducer(acc, x):
if acc == collection[0]:
return [ (acc, y) for y in collection[1:] ] + [ (x, y) for y in collection[2:] ]
else:
start_index = len(collection) - 1 - inverse_natural_sum(len(acc), len(collection) - 1) + 2
return acc + [ (x, y) for y in collection[start_index:] ]
return reduce(reducer, collection)
def flatten2(collection):
return [ y for x in collection for y in x ]
def product(nums):
return reduce(lambda acc, x: acc * x, nums)
def dot_prod(*vectors):
return sum( [ product(xs) for xs in zip(*vectors) ] )
def v_and(*vectors):
return [ reduce(lambda acc, x: acc and x, xs) for xs in zip(*vectors) ]
# This doesn't actually solve an arbitrary system of GF2 equations, but the law of diminishing returns encourages me to move on.
def GF2_solve_dot_prod_sys(goal, *vectors):
result_list = list( map(lambda x: GF2.zero, vectors[0]) )
if goal == GF2.one:
and_vector = v_and(*vectors)
if GF2.one in and_vector: result_list[and_vector.index(GF2.one)] = GF2.one
else: result_list = None
return result_list
| {
"repo_name": "josiah14/linear-algebra",
"path": "programming-the-matrix/1-week/the-vector-problems/Python/vector.py",
"copies": "1",
"size": "1918",
"license": "mit",
"hash": 6473707238706701000,
"line_mean": 34.5185185185,
"line_max": 128,
"alpha_frac": 0.6272158498,
"autogenerated": false,
"ratio": 3.2786324786324785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44058483284324784,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import groupby
import numpy as np
import itertools
def combination(d):
c = d.copy()
temp = 0
for j in range(5):
for i in range(j + 1, 5):
if c[i] == c[j]: temp += 1
if temp == 10: return -1
for j in range(5):
for i in range(4):
if c[i + 1] < c[i]:
c_temp = c[i]
c[i] = c[i + 1]
c[i + 1] = c_temp
P = [1, 13, 169, 169 * 13, 169 * 169]
k = 0
if temp == 6 or temp == 4:
if c[4] == c[2]:
k = c[4] * P[1] + c[0]
else:
k = c[2] * P[1] + c[4]
elif temp == 3:
if c[4] == c[2]: k = c[4] * P[2] + c[1] * P[1] + c[0]
if c[3] == c[1]: k = c[3] * P[2] + c[4] * P[1] + c[0]
if c[2] == c[0]: k = c[2] * P[2] + c[4] * P[1] + c[3]
elif temp == 2:
if c[4] != c[3]: k = c[3] * P[2] + c[1] * P[1] + c[4]
if c[0] != c[1]: k = c[3] * P[2] + c[1] * P[1] + c[0]
if c[2] != c[3]: k = c[3] * P[2] + c[1] * P[1] + c[2]
elif temp == 1:
if c[4] == c[3]: k = c[4] * P[3] + c[2] * P[2] + c[1] * P[1] + c[0]
if c[3] == c[2]: k = c[3] * P[3] + c[4] * P[2] + c[1] * P[1] + c[0]
if c[2] == c[1]: k = c[2] * P[3] + c[4] * P[2] + c[3] * P[1] + c[0]
if c[1] == c[0]: k = c[1] * P[3] + c[4] * P[2] + c[3] * P[1] + c[2]
elif temp == 0:
k = c[4] * P[4] + c[3] * P[3] + c[2] * P[2] + c[1] * P[1] + c[0] * P[0]
if temp == 6: temp = 7
if temp == 4: temp = 6
if temp == 0 and c[4] == 12 and c[3] == 3 and c[0] == 0: temp = 4; k = 1
if temp == 0 and (c[4] - c[0]) == 4: temp = 4
return temp * 10 ** 7 + k
def combinationF5(cr, cs):
f = 0
sf = 0
if all(x == cs[0] for x in cs): f = 1
rank = combination(cr)
if f == 1: return 5 * 10 ** 7 + rank
return rank
def combinationF7(cr, cs):
comb = []
idx_comb = list(itertools.combinations(range(7), 5))
for i in idx_comb:
try:
comb.append(combinationF5(cr=cr[list(i)], cs=cs[list(i)]))
except:
print(i)
return max(comb)
class HandEvaluator:
HIGHCARD = 0
ONEPAIR = 1 << 8
TWOPAIR = 1 << 9
THREECARD = 1 << 10
STRAIGHT = 1 << 11
FLASH = 1 << 12
FULLHOUSE = 1 << 13
FOURCARD = 1 << 14
STRAIGHTFLASH = 1 << 15
HAND_STRENGTH_MAP = {
HIGHCARD: "HIGHCARD",
ONEPAIR: "ONEPAIR",
TWOPAIR: "TWOPAIR",
THREECARD: "THREECARD",
STRAIGHT: "STRAIGHT",
FLASH: "FLASH",
FULLHOUSE: "FULLHOUSE",
FOURCARD: "FOURCARD",
STRAIGHTFLASH: "STRAIGHTFLASH"
}
@classmethod
def gen_hand_rank_info(self, hole, community):
hand = self.eval_hand(hole, community)
row_strength = self.__mask_hand_strength(hand)
strength = self.HAND_STRENGTH_MAP[row_strength]
hand_high = self.__mask_hand_high_rank(hand)
hand_low = self.__mask_hand_low_rank(hand)
hole_high = self.__mask_hole_high_rank(hand)
hole_low = self.__mask_hole_low_rank(hand)
return {
"hand" : {
"strength" : strength,
"high" : hand_high,
"low" : hand_low
},
"hole" : {
"high" : hole_high,
"low" : hole_low
}
}
@classmethod
def eval_hand(self, hole, community):
ranks = sorted([card.rank for card in hole])
hole_flg = ranks[1] << 4 | ranks[0]
hand_flg = self.__calc_hand_info_flg(hole, community) << 8
return hand_flg | hole_flg
@classmethod
def eval_hand_true(self, hole, community):
cards = hole + community
if len(cards) < 7:
return 1
ranks = np.array([card.rank - 2 for card in cards])
suits = np.array([card.suit for card in cards])
return combinationF7(ranks, suits)
# Return Format
# [Bit flg of hand][rank1(4bit)][rank2(4bit)]
# ex.)
# HighCard hole card 3,4 => 100 0011
# OnePair of rank 3 => 1 0011 0000
# TwoPair of rank A, 4 => 10 1110 0100
# ThreeCard of rank 9 => 100 1001 0000
# Straight of rank 10 => 1000 1010 0000
# Flash of rank 5 => 10000 0101 0000
# FullHouse of rank 3, 4 => 100000 0011 0100
# FourCard of rank 2 => 1000000 0010 0000
# straight flash of rank 7 => 10000000 0111 0000
@classmethod
def __calc_hand_info_flg(self, hole, community):
cards = hole + community
if self.__is_straightflash(cards): return self.STRAIGHTFLASH | self.__eval_straightflash(cards)
if self.__is_fourcard(cards): return self.FOURCARD | self.__eval_fourcard(cards)
if self.__is_fullhouse(cards): return self.FULLHOUSE | self.__eval_fullhouse(cards)
if self.__is_flash(cards): return self.FLASH | self.__eval_flash(cards)
if self.__is_straight(cards): return self.STRAIGHT | self.__eval_straight(cards)
if self.__is_threecard(cards): return self.THREECARD | self.__eval_threecard(cards)
if self.__is_twopair(cards): return self.TWOPAIR | self.__eval_twopair(cards)
if self.__is_onepair(cards): return self.ONEPAIR | (self.__eval_onepair(cards))
return self.__eval_holecard(hole)
@classmethod
def __eval_holecard(self, hole):
ranks = sorted([card.rank for card in hole])
return ranks[1] << 4 | ranks[0]
@classmethod
def __is_onepair(self, cards):
return self.__eval_onepair(cards) != 0
@classmethod
def __eval_onepair(self, cards):
rank = 0
memo = 0 # bit memo
for card in cards:
mask = 1 << card.rank
if memo & mask != 0: rank = max(rank, card.rank)
memo |= mask
return rank << 4
@classmethod
def __is_twopair(self, cards):
return len(self.__search_twopair(cards)) == 2
@classmethod
def __eval_twopair(self, cards):
ranks = self.__search_twopair(cards)
return ranks[0] << 4 | ranks[1]
@classmethod
def __search_twopair(self, cards):
ranks = []
memo = 0
for card in cards:
mask = 1 << card.rank
if memo & mask != 0: ranks.append(card.rank)
memo |= mask
return sorted(ranks)[::-1][:2]
@classmethod
def __is_threecard(self, cards):
return self.__search_threecard(cards) != -1
@classmethod
def __eval_threecard(self, cards):
return self.__search_threecard(cards) << 4
@classmethod
def __search_threecard(self, cards):
rank = -1
bit_memo = reduce(lambda memo,card: memo + (1 << (card.rank-1)*3), cards, 0)
for r in range(2, 15):
bit_memo >>= 3
count = bit_memo & 7
if count >= 3: rank = r
return rank
@classmethod
def __is_straight(self, cards):
return self.__search_straight(cards) != -1
@classmethod
def __eval_straight(self, cards):
return self.__search_straight(cards) << 4
@classmethod
def __search_straight(self, cards):
bit_memo = reduce(lambda memo, card: memo | 1 << card.rank, cards, 0)
rank = -1
straight_check = lambda acc, i: acc & (bit_memo >> (r+i) & 1) == 1
for r in range(2, 15):
if reduce(straight_check, range(5), True): rank = r
return rank
@classmethod
def __is_flash(self, cards):
return self.__search_flash(cards) != -1
@classmethod
def __eval_flash(self, cards):
return self.__search_flash(cards) << 4
@classmethod
def __search_flash(self, cards):
best_suit_rank = -1
fetch_suit = lambda card: card.suit
fetch_rank = lambda card: card.rank
for suit, group_obj in groupby(sorted(cards, key=fetch_suit), key=fetch_suit):
g = list(group_obj)
if len(g) >= 5:
max_rank_card = max(g, key=fetch_rank)
best_suit_rank = max(best_suit_rank, max_rank_card.rank)
return best_suit_rank
@classmethod
def __is_fullhouse(self, cards):
r1, r2 = self.__search_fullhouse(cards)
return r1 and r2
@classmethod
def __eval_fullhouse(self, cards):
r1, r2 = self.__search_fullhouse(cards)
return r1 << 4 | r2
@classmethod
def __search_fullhouse(self, cards):
fetch_rank = lambda card: card.rank
three_card_ranks, two_pair_ranks = [], []
for rank, group_obj in groupby(sorted(cards, key=fetch_rank), key=fetch_rank):
g = list(group_obj)
if len(g) >= 3:
three_card_ranks.append(rank)
if len(g) >= 2:
two_pair_ranks.append(rank)
two_pair_ranks = [rank for rank in two_pair_ranks if not rank in three_card_ranks]
if len(three_card_ranks) == 2:
two_pair_ranks.append(min(three_card_ranks))
max_ = lambda l: None if len(l)==0 else max(l)
return max_(three_card_ranks), max_(two_pair_ranks)
@classmethod
def __is_fourcard(self, cards):
return self.__eval_fourcard(cards) != 0
@classmethod
def __eval_fourcard(self, cards):
rank = self.__search_fourcard(cards)
return rank << 4
@classmethod
def __search_fourcard(self, cards):
fetch_rank = lambda card: card.rank
for rank, group_obj in groupby(sorted(cards, key=fetch_rank), key=fetch_rank):
g = list(group_obj)
if len(g) >= 4:
return rank
return 0
@classmethod
def __is_straightflash(self, cards):
return self.__search_straightflash(cards) != -1
@classmethod
def __eval_straightflash(self, cards):
return self.__search_straightflash(cards) << 4
@classmethod
def __search_straightflash(self, cards):
flash_cards = []
fetch_suit = lambda card: card.suit
for suit, group_obj in groupby(sorted(cards, key=fetch_suit), key=fetch_suit):
g = list(group_obj)
if len(g) >= 5: flash_cards = g
return self.__search_straight(flash_cards)
@classmethod
def __mask_hand_strength(self, bit):
mask = 511 << 16
return (bit & mask) >> 8 # 511 = (1 << 9) -1
@classmethod
def __mask_hand_high_rank(self, bit):
mask = 15 << 12
return (bit & mask) >> 12
@classmethod
def __mask_hand_low_rank(self, bit):
mask = 15 << 8
return (bit & mask) >> 8
@classmethod
def __mask_hole_high_rank(self, bit):
mask = 15 << 4
return (bit & mask) >> 4
@classmethod
def __mask_hole_low_rank(self, bit):
mask = 15
return bit & mask
| {
"repo_name": "sberbank-ai/holdem-challenge",
"path": "PyPokerEngine/pypokerengine/engine/hand_evaluator.py",
"copies": "1",
"size": "9969",
"license": "mit",
"hash": 8302575163280334000,
"line_mean": 28.149122807,
"line_max": 99,
"alpha_frac": 0.5695656535,
"autogenerated": false,
"ratio": 2.8507291964541035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8790530664543483,
"avg_score": 0.025952837082124192,
"num_lines": 342
} |
from functools import reduce
from itertools import groupby
class HandEvaluator:
HIGHCARD = 0
ONEPAIR = 1 << 8
TWOPAIR = 1 << 9
THREECARD = 1 << 10
STRAIGHT = 1 << 11
FLASH = 1 << 12
FULLHOUSE = 1 << 13
FOURCARD = 1 << 14
STRAIGHTFLASH = 1 << 15
HAND_STRENGTH_MAP = {
HIGHCARD: "HIGHCARD",
ONEPAIR: "ONEPAIR",
TWOPAIR: "TWOPAIR",
THREECARD: "THREECARD",
STRAIGHT: "STRAIGHT",
FLASH: "FLASH",
FULLHOUSE: "FULLHOUSE",
FOURCARD: "FOURCARD",
STRAIGHTFLASH: "STRAIGHTFLASH"
}
@classmethod
def gen_hand_rank_info(self, hole, community):
hand = self.eval_hand(hole, community)
row_strength = self.__mask_hand_strength(hand)
strength = self.HAND_STRENGTH_MAP[row_strength]
hand_high = self.__mask_hand_high_rank(hand)
hand_low = self.__mask_hand_low_rank(hand)
hole_high = self.__mask_hole_high_rank(hand)
hole_low = self.__mask_hole_low_rank(hand)
return {
"hand" : {
"strength" : strength,
"high" : hand_high,
"low" : hand_low
},
"hole" : {
"high" : hole_high,
"low" : hole_low
}
}
@classmethod
def eval_hand(self, hole, community):
ranks = sorted([card.rank for card in hole])
hole_flg = ranks[1] << 4 | ranks[0]
hand_flg = self.__calc_hand_info_flg(hole, community) << 8
return hand_flg | hole_flg
# Return Format
# [Bit flg of hand][rank1(4bit)][rank2(4bit)]
# ex.)
# HighCard hole card 3,4 => 100 0011
# OnePair of rank 3 => 1 0011 0000
# TwoPair of rank A, 4 => 10 1110 0100
# ThreeCard of rank 9 => 100 1001 0000
# Straight of rank 10 => 1000 1010 0000
# Flash of rank 5 => 10000 0101 0000
# FullHouse of rank 3, 4 => 100000 0011 0100
# FourCard of rank 2 => 1000000 0010 0000
# straight flash of rank 7 => 10000000 0111 0000
@classmethod
def __calc_hand_info_flg(self, hole, community):
cards = hole + community
if self.__is_straightflash(cards): return self.STRAIGHTFLASH | self.__eval_straightflash(cards)
if self.__is_fourcard(cards): return self.FOURCARD | self.__eval_fourcard(cards)
if self.__is_fullhouse(cards): return self.FULLHOUSE | self.__eval_fullhouse(cards)
if self.__is_flash(cards): return self.FLASH | self.__eval_flash(cards)
if self.__is_straight(cards): return self.STRAIGHT | self.__eval_straight(cards)
if self.__is_threecard(cards): return self.THREECARD | self.__eval_threecard(cards)
if self.__is_twopair(cards): return self.TWOPAIR | self.__eval_twopair(cards)
if self.__is_onepair(cards): return self.ONEPAIR | (self.__eval_onepair(cards))
return self.__eval_holecard(hole)
@classmethod
def __eval_holecard(self, hole):
ranks = sorted([card.rank for card in hole])
return ranks[1] << 4 | ranks[0]
@classmethod
def __is_onepair(self, cards):
return self.__eval_onepair(cards) != 0
@classmethod
def __eval_onepair(self, cards):
rank = 0
memo = 0 # bit memo
for card in cards:
mask = 1 << card.rank
if memo & mask != 0: rank = max(rank, card.rank)
memo |= mask
return rank << 4
@classmethod
def __is_twopair(self, cards):
return len(self.__search_twopair(cards)) == 2
@classmethod
def __eval_twopair(self, cards):
ranks = self.__search_twopair(cards)
return ranks[0] << 4 | ranks[1]
@classmethod
def __search_twopair(self, cards):
ranks = []
memo = 0
for card in cards:
mask = 1 << card.rank
if memo & mask != 0: ranks.append(card.rank)
memo |= mask
return sorted(ranks)[::-1][:2]
@classmethod
def __is_threecard(self, cards):
return self.__search_threecard(cards) != -1
@classmethod
def __eval_threecard(self, cards):
return self.__search_threecard(cards) << 4
@classmethod
def __search_threecard(self, cards):
rank = -1
bit_memo = reduce(lambda memo,card: memo + (1 << (card.rank-1)*3), cards, 0)
for r in range(2, 15):
bit_memo >>= 3
count = bit_memo & 7
if count >= 3: rank = r
return rank
@classmethod
def __is_straight(self, cards):
return self.__search_straight(cards) != -1
@classmethod
def __eval_straight(self, cards):
return self.__search_straight(cards) << 4
@classmethod
def __search_straight(self, cards):
bit_memo = reduce(lambda memo, card: memo | 1 << card.rank, cards, 0)
rank = -1
straight_check = lambda acc, i: acc & (bit_memo >> (r+i) & 1) == 1
for r in range(2, 15):
if reduce(straight_check, range(5), True): rank = r
return rank
@classmethod
def __is_flash(self, cards):
return self.__search_flash(cards) != -1
@classmethod
def __eval_flash(self, cards):
return self.__search_flash(cards) << 4
@classmethod
def __search_flash(self, cards):
best_suit_rank = -1
fetch_suit = lambda card: card.suit
fetch_rank = lambda card: card.rank
for suit, group_obj in groupby(sorted(cards, key=fetch_suit), key=fetch_suit):
g = list(group_obj)
if len(g) >= 5:
max_rank_card = max(g, key=fetch_rank)
best_suit_rank = max(best_suit_rank, max_rank_card.rank)
return best_suit_rank
@classmethod
def __is_fullhouse(self, cards):
r1, r2 = self.__search_fullhouse(cards)
return r1 and r2
@classmethod
def __eval_fullhouse(self, cards):
r1, r2 = self.__search_fullhouse(cards)
return r1 << 4 | r2
@classmethod
def __search_fullhouse(self, cards):
fetch_rank = lambda card: card.rank
three_card_ranks, two_pair_ranks = [], []
for rank, group_obj in groupby(sorted(cards, key=fetch_rank), key=fetch_rank):
g = list(group_obj)
if len(g) >= 3:
three_card_ranks.append(rank)
if len(g) >= 2:
two_pair_ranks.append(rank)
two_pair_ranks = [rank for rank in two_pair_ranks if not rank in three_card_ranks]
if len(three_card_ranks) == 2:
two_pair_ranks.append(min(three_card_ranks))
max_ = lambda l: None if len(l)==0 else max(l)
return max_(three_card_ranks), max_(two_pair_ranks)
@classmethod
def __is_fourcard(self, cards):
return self.__eval_fourcard(cards) != 0
@classmethod
def __eval_fourcard(self, cards):
rank = self.__search_fourcard(cards)
return rank << 4
@classmethod
def __search_fourcard(self, cards):
fetch_rank = lambda card: card.rank
for rank, group_obj in groupby(sorted(cards, key=fetch_rank), key=fetch_rank):
g = list(group_obj)
if len(g) >= 4:
return rank
return 0
@classmethod
def __is_straightflash(self, cards):
return self.__search_straightflash(cards) != -1
@classmethod
def __eval_straightflash(self, cards):
return self.__search_straightflash(cards) << 4
@classmethod
def __search_straightflash(self, cards):
flash_cards = []
fetch_suit = lambda card: card.suit
for suit, group_obj in groupby(sorted(cards, key=fetch_suit), key=fetch_suit):
g = list(group_obj)
if len(g) >= 5: flash_cards = g
return self.__search_straight(flash_cards)
@classmethod
def __mask_hand_strength(self, bit):
mask = 511 << 16
return (bit & mask) >> 8 # 511 = (1 << 9) -1
@classmethod
def __mask_hand_high_rank(self, bit):
mask = 15 << 12
return (bit & mask) >> 12
@classmethod
def __mask_hand_low_rank(self, bit):
mask = 15 << 8
return (bit & mask) >> 8
@classmethod
def __mask_hole_high_rank(self, bit):
mask = 15 << 4
return (bit & mask) >> 4
@classmethod
def __mask_hole_low_rank(self, bit):
mask = 15
return bit & mask
| {
"repo_name": "ishikota/PyPokerEngine",
"path": "pypokerengine/engine/hand_evaluator.py",
"copies": "1",
"size": "7794",
"license": "mit",
"hash": -232086526779720540,
"line_mean": 28.9769230769,
"line_max": 99,
"alpha_frac": 0.6050808314,
"autogenerated": false,
"ratio": 3.1288639100762747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9105471327905308,
"avg_score": 0.025694682714193548,
"num_lines": 260
} |
from functools import reduce
from itertools import groupby
from .functions import Compose
def mamdani(rules_output, defuzzy, functions, step=0.1):
"""
Mamdani model
:param rules_output: List of tuples (w, outputVar, linguisticVal) representing the output of each rule's evaluation
:param defuzzy: Defuzzification function
:param functions: Dictionary which associates a linguistic value with its corresponding membership function
:param step: Distance between values to discretize function's domains
:return: Dictionary that associates each output variable name with its calculated numeric value
"""
rules_output.sort(key=lambda r: r[1])
result = {}
for var_name, var_rules in groupby(rules_output, lambda r: r[1]): # group rules_output by the output variables
aggr_func = map(
lambda ro: (ro[0], functions[ro[2]]),
var_rules) # List of tuples (trunc, function) which corresponds to variable 'var_name'
compose = Compose(list(aggr_func))
result[var_name] = defuzzy(compose, step) # Defuzzify and store result corresponding to variable 'var_name'
return result
def sugeno(rules_output):
"""
Sugeno model with weighted average
:param rules_output: List of tuples (w, outputVar, z) representing the output of each rule's evaluation
:return: Dictionary that associates each output variable name with its calculated numeric value
"""
num = {}
den = {}
for w, variable, z in rules_output:
num[variable] = num.get(variable, 0) + w * z
den[variable] = den.get(variable, 0) + w
return _divide(num, den)
def tsukamoto(rules_output, functions, step=0.1):
"""
Tsukamoto model
:param rules_output: List of tuples (w, outputVar, linguisticVal) representing the output of each rule's evaluation
:param functions: Dictionary which associates a linguistic value with its corresponding membership function
:param step: Distance between values to discretize function's domains
:return: Dictionary that associates each output variable name with its calculated numeric value
"""
num = {}
den = {}
for w, variable, func in rules_output:
func = functions[func]
points = list(func.points(step))
z = reduce(
lambda x, y: x if abs(x[1] - w) < abs(y[1] - w) else y, # choose the one whose image is closer to 'w'
points,
points[0])[0] # 'z' is the value of function's domain with image approximately 'w'
num[variable] = num.get(variable, 0) + w * z
den[variable] = den.get(variable, 0) + w
return _divide(num, den)
def _divide(num, den):
"""
Divide values corresponding to the same key in two dictionaries
:param num: Dividend dictionary
:param den: Divider dictionary
:return: New dictionary with the same keys as the parameters and the divisions results as values
"""
results = {}
for variable in num.keys():
results[variable] = num[variable] / den[variable]
return results
| {
"repo_name": "ealmuina/fuzzy-logic-evaluator",
"path": "fuzzy/models.py",
"copies": "1",
"size": "3081",
"license": "mit",
"hash": -3608028277340996000,
"line_mean": 37.5125,
"line_max": 119,
"alpha_frac": 0.6757546251,
"autogenerated": false,
"ratio": 3.9449423815621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016245509490604919,
"num_lines": 80
} |
from functools import reduce
from itertools import groupby
from pypokerengine.engine.hand_evaluator import HandEvaluator
from pypokerengine.engine.pay_info import PayInfo
class GameEvaluator:
@classmethod
def judge(self, table):
winners = self.__find_winners_from(table.get_community_card(), table.seats.players)
hand_info = self.__gen_hand_info_if_needed(table.seats.players, table.get_community_card())
prize_map = self.__calc_prize_distribution(table.get_community_card(), table.seats.players)
return winners, hand_info, prize_map
@classmethod
def create_pot(self, players):
side_pots = self.__get_side_pots(players)
main_pot = self.__get_main_pot(players, side_pots)
return side_pots + [main_pot]
@classmethod
def __calc_prize_distribution(self, community_card, players):
prize_map = self.__create_prize_map(len(players))
pots = self.create_pot(players)
for pot in pots:
winners = self.__find_winners_from(community_card, pot["eligibles"])
prize = int(pot["amount"] / len(winners))
for winner in winners:
prize_map[players.index(winner)] += prize
return prize_map
@classmethod
def __create_prize_map(self, player_num):
def update(d, other): d.update(other); return d
return reduce(update, [{i:0} for i in range(player_num)], {})
@classmethod
def __find_winners_from(self, community_card, players):
score_player = lambda player: HandEvaluator.eval_hand_true(player.hole_card, community_card)
active_players = [player for player in players if player.is_active()]
scores = [score_player(player) for player in active_players]
best_score = max(scores)
score_with_players = [(score, player) for score, player in zip(scores, active_players)]
winners = [s_p[1] for s_p in score_with_players if s_p[0] == best_score]
return winners
@classmethod
def __gen_hand_info_if_needed(self, players, community):
active_players = [player for player in players if player.is_active()]
gen_hand_info = lambda player: { "uuid": player.uuid, "hand" : HandEvaluator.gen_hand_rank_info(player.hole_card, community) }
return [] if len(active_players) == 1 else [gen_hand_info(player) for player in active_players]
@classmethod
def __get_main_pot(self, players, sidepots):
max_pay = max([pay.amount for pay in self.__get_payinfo(players)])
return {
"amount": self.__get_players_pay_sum(players) - self.__get_sidepots_sum(sidepots),
"eligibles": [player for player in players if player.pay_info.amount == max_pay]
}
@classmethod
def __get_players_pay_sum(self, players):
return sum([pay.amount for pay in self.__get_payinfo(players)])
@classmethod
def __get_side_pots(self, players):
pay_amounts = [payinfo.amount for payinfo in self.__fetch_allin_payinfo(players)]
gen_sidepots = lambda sidepots, allin_amount: sidepots + [self.__create_sidepot(players, sidepots, allin_amount)]
return reduce(gen_sidepots, pay_amounts, [])
@classmethod
def __create_sidepot(self, players, smaller_side_pots, allin_amount):
return {
"amount": self.__calc_sidepot_size(players, smaller_side_pots, allin_amount),
"eligibles" : self.__select_eligibles(players, allin_amount)
}
@classmethod
def __calc_sidepot_size(self, players, smaller_side_pots, allin_amount):
add_chip_for_pot = lambda pot, player: pot + min(allin_amount, player.pay_info.amount)
target_pot_size = reduce(add_chip_for_pot, players, 0)
return target_pot_size - self.__get_sidepots_sum(smaller_side_pots)
@classmethod
def __get_sidepots_sum(self, sidepots):
return reduce(lambda sum_, sidepot: sum_ + sidepot["amount"], sidepots, 0)
@classmethod
def __select_eligibles(self, players, allin_amount):
return [player for player in players if self.__is_eligible(player, allin_amount)]
@classmethod
def __is_eligible(self, player, allin_amount):
return player.pay_info.amount >= allin_amount and \
player.pay_info.status != PayInfo.FOLDED
@classmethod
def __fetch_allin_payinfo(self, players):
payinfo = self.__get_payinfo(players)
allin_info = [info for info in payinfo if info.status == PayInfo.ALLIN]
return sorted(allin_info, key=lambda info: info.amount)
@classmethod
def __get_payinfo(self, players):
return [player.pay_info for player in players]
| {
"repo_name": "sberbank-ai/holdem-challenge",
"path": "PyPokerEngine/pypokerengine/engine/game_evaluator.py",
"copies": "1",
"size": "4390",
"license": "mit",
"hash": -376532597012173060,
"line_mean": 39.6481481481,
"line_max": 130,
"alpha_frac": 0.6958997722,
"autogenerated": false,
"ratio": 3.2639405204460967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9373816239610826,
"avg_score": 0.017204810607054033,
"num_lines": 108
} |
from functools import reduce
from itertools import permutations
from ..combinatorics import Permutation
from ..core import (Add, Basic, Dummy, Expr, Function, Integer, Mul, Pow,
Tuple, diff, symbols, sympify)
from ..core.numbers import Zero
from ..functions import factorial
from ..matrices import Matrix
from ..simplify import simplify
from ..solvers import solve
# TODO you are a bit excessive in the use of Dummies
# TODO dummy point, literal field
# TODO too often one needs to call doit or simplify on the output, check the
# tests and find out why
from ..tensor import ImmutableDenseNDimArray
class Manifold(Basic):
"""Object representing a mathematical manifold.
The only role that this object plays is to keep a list of all patches
defined on the manifold. It does not provide any means to study the
topological characteristics of the manifold that it represents.
"""
def __new__(cls, name, dim):
name = sympify(name)
dim = sympify(dim)
obj = Basic.__new__(cls, name, dim)
obj.name = name
obj.dim = dim
obj.patches = []
# The patches list is necessary if a Patch instance needs to enumerate
# other Patch instance on the same manifold.
return obj
def _latex(self, printer, *args):
name = str(self.name)
if len(name) == 1:
if name.isupper():
return r'\mathbb{%s}^{%s}' % (self.name, self.dim)
return r'\mathrm{%s}' % self.name
class Patch(Basic):
"""Object representing a patch on a manifold.
On a manifold one can have many patches that do not always include the
whole manifold. On these patches coordinate charts can be defined that
permit the parameterization of any point on the patch in terms of a tuple
of real numbers (the coordinates).
This object serves as a container/parent for all coordinate system charts
that can be defined on the patch it represents.
Examples
========
Define a Manifold and a Patch on that Manifold:
>>> m = Manifold('M', 3)
>>> p = Patch('P', m)
>>> p in m.patches
True
"""
# Contains a reference to the parent manifold in order to be able to access
# other patches.
def __new__(cls, name, manifold):
name = sympify(name)
obj = Basic.__new__(cls, name, manifold)
obj.name = name
obj.manifold = manifold
obj.manifold.patches.append(obj)
obj.coord_systems = []
# The list of coordinate systems is necessary for an instance of
# CoordSystem to enumerate other coord systems on the patch.
return obj
@property
def dim(self):
return self.manifold.dim
def _latex(self, printer, *args):
return r'\mathrm{%s}_{%s}' % (self.name, self.manifold._latex(printer, *args))
class CoordSystem(Basic):
"""Contains all coordinate transformation logic.
Examples
========
Define a Manifold and a Patch, and then define two coord systems on that
patch:
>>> r, theta = symbols('r, theta')
>>> m = Manifold('M', 2)
>>> patch = Patch('P', m)
>>> rect = CoordSystem('rect', patch)
>>> polar = CoordSystem('polar', patch)
>>> rect in patch.coord_systems
True
Connect the coordinate systems. An inverse transformation is automatically
found by ``solve`` when possible:
>>> polar.connect_to(rect, [r, theta], [r*cos(theta), r*sin(theta)])
>>> polar.coord_tuple_transform_to(rect, [0, 2])
Matrix([
[0],
[0]])
>>> polar.coord_tuple_transform_to(rect, [2, pi/2])
Matrix([
[0],
[2]])
>>> rect.coord_tuple_transform_to(polar, [1, 1]).applyfunc(simplify)
Matrix([
[sqrt(2)],
[ pi/4]])
Calculate the jacobian of the polar to cartesian transformation:
>>> polar.jacobian(rect, [r, theta])
Matrix([
[cos(theta), -r*sin(theta)],
[sin(theta), r*cos(theta)]])
Define a point using coordinates in one of the coordinate systems:
>>> p = polar.point([1, 3*pi/4])
>>> rect.point_to_coords(p)
Matrix([
[-sqrt(2)/2],
[ sqrt(2)/2]])
Define a basis scalar field (i.e. a coordinate function), that takes a
point and returns its coordinates. It is an instance of ``BaseScalarField``.
>>> rect.coord_function(0)(p)
-sqrt(2)/2
>>> rect.coord_function(1)(p)
sqrt(2)/2
Define a basis vector field (i.e. a unit vector field along the coordinate
line). Vectors are also differential operators on scalar fields. It is an
instance of ``BaseVectorField``.
>>> v_x = rect.base_vector(0)
>>> x = rect.coord_function(0)
>>> v_x(x)
1
>>> v_x(v_x(x))
0
Define a basis oneform field:
>>> dx = rect.base_oneform(0)
>>> dx(v_x)
1
If you provide a list of names the fields will print nicely:
- without provided names:
>>> x, v_x, dx
(rect_0, e_rect_0, drect_0)
- with provided names
>>> rect = CoordSystem('rect', patch, ['x', 'y'])
>>> rect.coord_function(0), rect.base_vector(0), rect.base_oneform(0)
(x, e_x, dx)
"""
# Contains a reference to the parent patch in order to be able to access
# other coordinate system charts.
def __new__(cls, name, patch, names=None):
name = sympify(name)
# names is not in args because it is related only to printing, not to
# identifying the CoordSystem instance.
if not names:
names = [f'{name}_{i:d}' for i in range(patch.dim)]
if isinstance(names, Tuple):
obj = Basic.__new__(cls, name, patch, names)
else:
names = Tuple(*symbols(names))
obj = Basic.__new__(cls, name, patch, names)
obj.name = name
obj._names = [str(i) for i in names.args]
obj.patch = patch
obj.patch.coord_systems.append(obj)
obj.transforms = {}
# All the coordinate transformation logic is in this dictionary in the
# form of:
# key = other coordinate system
# value = tuple of # TODO make these Lambda instances
# - list of `Dummy` coordinates in this coordinate system
# - list of expressions as a function of the Dummies giving
# the coordinates in another coordinate system
obj._dummies = [Dummy(str(n)) for n in names]
obj._dummy = Dummy()
return obj
@property
def dim(self):
return self.patch.dim
##########################################################################
# Coordinate transformations.
##########################################################################
def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False):
"""Register the transformation used to switch to another coordinate system.
Parameters
==========
to_sys
another instance of ``CoordSystem``
from_coords
list of symbols in terms of which ``to_exprs`` is given
to_exprs
list of the expressions of the new coordinate tuple
inverse
try to deduce and register the inverse transformation
fill_in_gaps
try to deduce other transformation that are made
possible by composing the present transformation with other already
registered transformation
"""
from_coords, to_exprs = dummyfy(from_coords, to_exprs)
self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs)
if inverse:
to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs)
if fill_in_gaps:
raise NotImplementedError
@staticmethod
def _inv_transf(from_coords, to_exprs):
# TODO, check for results, get solve to return results in definite
# format instead of wondering dict/tuple/whatever.
# As it is at the moment this is an ugly hack for changing the format
inv_from = [i.as_dummy() for i in from_coords]
inv_to = solve([t[0] - t[1] for t in zip(inv_from, to_exprs)],
list(from_coords))
inv_to = [inv_to[0][fc] for fc in from_coords]
return Matrix(inv_from), Matrix(inv_to)
def coord_tuple_transform_to(self, to_sys, coords):
"""Transform ``coords`` to coord system ``to_sys``.
See Also
========
CoordSystem
"""
coords = Matrix(coords)
if self != to_sys:
transf = self.transforms[to_sys]
coords = transf[1].subs(list(zip(transf[0], coords)))
return coords
def jacobian(self, to_sys, coords):
"""Return the jacobian matrix of a transformation."""
with_dummies = self.coord_tuple_transform_to(
to_sys, self._dummies).jacobian(self._dummies)
return with_dummies.subs(list(zip(self._dummies, coords)))
##########################################################################
# Base fields.
##########################################################################
def coord_function(self, coord_index):
"""Return a ``BaseScalarField`` that takes a point and returns one of the coords.
Takes a point and returns its coordinate in this coordinate system.
See Also
========
CoordSystem
"""
return BaseScalarField(self, coord_index)
def coord_functions(self):
"""Returns a list of all coordinate functions.
For more details see the ``coord_function`` method of this class.
"""
return [self.coord_function(i) for i in range(self.dim)]
def base_vector(self, coord_index):
"""Return a basis vector field.
The basis vector field for this coordinate system. It is also an
operator on scalar fields.
See Also
========
CoordSystem
"""
return BaseVectorField(self, coord_index)
def base_vectors(self):
"""Returns a list of all base vectors.
For more details see the ``base_vector`` method of this class.
"""
return [self.base_vector(i) for i in range(self.dim)]
def base_oneform(self, coord_index):
"""Return a basis 1-form field.
The basis one-form field for this coordinate system. It is also an
operator on vector fields.
See Also
========
CoordSystem
"""
return Differential(self.coord_function(coord_index))
def base_oneforms(self):
"""Returns a list of all base oneforms.
For more details see the ``base_oneform`` method of this class.
"""
return [self.base_oneform(i) for i in range(self.dim)]
##########################################################################
# Points.
##########################################################################
def point(self, coords):
"""Create a ``Point`` with coordinates given in this coord system.
See Also
========
CoordSystem
"""
return Point(self, coords)
def point_to_coords(self, point):
"""Calculate the coordinates of a point in this coord system.
See Also
========
CoordSystem
"""
return point.coords(self)
##########################################################################
# Printing.
##########################################################################
def _latex(self, printer, *args):
return r'\mathrm{%s}^{\mathrm{%s}}_{%s}' % (
self.name, self.patch.name, self.patch.manifold._latex(printer, *args))
class Point(Basic):
"""Point in a Manifold object.
To define a point you must supply coordinates and a coordinate system.
The usage of this object after its definition is independent of the
coordinate system that was used in order to define it, however due to
limitations in the simplification routines you can arrive at complicated
expressions if you use inappropriate coordinate systems.
Examples
========
Define the boilerplate Manifold, Patch and coordinate systems:
>>> from diofant.diffgeom import Point
>>> r, theta = symbols('r, theta')
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> rect = CoordSystem('rect', p)
>>> polar = CoordSystem('polar', p)
>>> polar.connect_to(rect, [r, theta], [r*cos(theta), r*sin(theta)])
Define a point using coordinates from one of the coordinate systems:
>>> p = Point(polar, [r, 3*pi/4])
>>> p.coords()
Matrix([
[ r],
[3*pi/4]])
>>> p.coords(rect)
Matrix([
[-sqrt(2)*r/2],
[ sqrt(2)*r/2]])
"""
def __init__(self, coord_sys, coords):
super().__init__()
self._coord_sys = coord_sys
self._coords = Matrix(coords).as_immutable()
self._args = self._coord_sys, self._coords
def coords(self, to_sys=None):
"""Coordinates of the point in a given coordinate system.
If ``to_sys`` is ``None`` it returns the coordinates in the system in
which the point was defined.
"""
if to_sys:
return self._coord_sys.coord_tuple_transform_to(to_sys, self._coords)
else:
return self._coords
class BaseScalarField(Expr):
"""Base Scalar Field over a Manifold for a given Coordinate System.
A scalar field takes a point as an argument and returns a scalar.
A base scalar field of a coordinate system takes a point and returns one of
the coordinates of that point in the coordinate system in question.
To define a scalar field you need to choose the coordinate system and the
index of the coordinate.
The use of the scalar field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in
the simplification routines you may arrive at more complicated
expression if you use inappropriate coordinate systems.
You can build complicated scalar fields by just building up Diofant
expressions containing ``BaseScalarField`` instances.
Examples
========
Define boilerplate Manifold, Patch and coordinate systems:
>>> r0, theta0 = symbols('r0, theta0')
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> rect = CoordSystem('rect', p)
>>> polar = CoordSystem('polar', p)
>>> polar.connect_to(rect, [r0, theta0], [r0*cos(theta0), r0*sin(theta0)])
Point to be used as an argument for the filed:
>>> point = polar.point([r0, 0])
Examples of fields:
>>> fx = BaseScalarField(rect, 0)
>>> fy = BaseScalarField(rect, 1)
>>> (fx**2+fy**2).rcall(point)
r0**2
>>> g = Function('g')
>>> ftheta = BaseScalarField(polar, 1)
>>> fg = g(ftheta-pi)
>>> fg.rcall(point)
g(-pi)
"""
is_commutative = True
def __new__(cls, coord_sys, index):
obj = Expr.__new__(cls, coord_sys, sympify(index))
obj._coord_sys = coord_sys
obj._index = index
return obj
def __call__(self, *args):
"""Evaluating the field at a point or doing nothing.
If the argument is a ``Point`` instance, the field is evaluated at that
point. The field is returned itself if the argument is any other
object. It is so in order to have working recursive calling mechanics
for all fields (check the ``__call__`` method of ``Expr``).
"""
point = args[0]
if len(args) != 1 or not isinstance(point, Point):
return self
coords = point.coords(self._coord_sys)
# XXX Calling doit is necessary with all the Subs expressions
# XXX Calling simplify is necessary with all the trig expressions
return simplify(coords[self._index]).doit()
# XXX Workaround for limitations on the content of args
free_symbols = set()
def doit(self, **hints):
return self
class BaseVectorField(Expr):
r"""Vector Field over a Manifold.
A vector field is an operator taking a scalar field and returning a
directional derivative (which is also a scalar field).
A base vector field is the same type of operator, however the derivation is
specifically done with respect to a chosen coordinate.
To define a base vector field you need to choose the coordinate system and
the index of the coordinate.
The use of the vector field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in the
simplification routines you may arrive at more complicated expression if you
use inappropriate coordinate systems.
Examples
========
Use the predefined R2 manifold, setup some boilerplate.
>>> from diofant.diffgeom.rn import R2, R2_p, R2_r
>>> x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0')
Points to be used as arguments for the field:
>>> point_p = R2_p.point([r0, theta0])
>>> point_r = R2_r.point([x0, y0])
Scalar field to operate on:
>>> g = Function('g')
>>> s_field = g(R2.x, R2.y)
>>> s_field.rcall(point_r)
g(x0, y0)
>>> s_field.rcall(point_p)
g(r0*cos(theta0), r0*sin(theta0))
Vector field:
>>> v = BaseVectorField(R2_r, 1)
>>> pprint(v(s_field), use_unicode=False)
/ d \|
|-----(g(x, xi_2))||
\dxi_2 /|xi_2=y
>>> pprint(v(s_field).rcall(point_r).doit(), use_unicode=False)
d
---(g(x0, y0))
dy0
>>> pprint(v(s_field).rcall(point_p).doit(), use_unicode=False)
/ d \|
|-----(g(r0*cos(theta0), xi_2))||
\dxi_2 /|xi_2=r0*sin(theta0)
"""
is_commutative = False
def __new__(cls, coord_sys, index):
index = sympify(index)
obj = Expr.__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
def __call__(self, scalar_field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field an error is raised.
"""
if covariant_order(scalar_field) or contravariant_order(scalar_field):
raise ValueError('Only scalar fields can be supplied as arguments to vector fields.')
base_scalars = list(scalar_field.atoms(BaseScalarField))
# First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r)
d_var = self._coord_sys._dummy
# TODO: you need a real dummy function for the next line
d_funcs = [Function(f'_#_{i}')(d_var) for i,
b in enumerate(base_scalars)]
d_result = scalar_field.subs(list(zip(base_scalars, d_funcs)))
d_result = d_result.diff(d_var)
# Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y))
coords = self._coord_sys._dummies
d_funcs_deriv = [f.diff(d_var) for f in d_funcs]
d_funcs_deriv_sub = []
for b in base_scalars:
jac = self._coord_sys.jacobian(b._coord_sys, coords)
d_funcs_deriv_sub.append(jac[b._index, self._index])
d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub)))
# Remove the dummies
result = d_result.subs(list(zip(d_funcs, base_scalars)))
result = result.subs(list(zip(coords, self._coord_sys.coord_functions())))
return result.doit() # XXX doit for the Subs instances
class Commutator(Expr):
r"""Commutator of two vector fields.
The commutator of two vector fields `v_1` and `v_2` is defined as the
vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal
to `v_1(v_2(f)) - v_2(v_1(f))`.
Examples
========
Use the predefined R2 manifold, setup some boilerplate.
>>> from diofant.diffgeom.rn import R2
Vector fields:
>>> e_x, e_y, e_r = R2.e_x, R2.e_y, R2.e_r
>>> c_xy = Commutator(e_x, e_y)
>>> c_xr = Commutator(e_x, e_r)
>>> c_xy
0
Unfortunately, the current code is not able to compute everything:
>>> c_xr
Commutator(e_x, e_r)
>>> simplify(c_xr(R2.y**2).doit())
-2*cos(theta)*y**2/(x**2 + y**2)
"""
def __new__(cls, v1, v2):
if (covariant_order(v1) or contravariant_order(v1) != 1
or covariant_order(v2) or contravariant_order(v2) != 1):
raise ValueError(
'Only commutators of vector fields are supported.')
if v1 == v2:
return Zero()
coord_sys = set().union(*[v.atoms(CoordSystem) for v in (v1, v2)])
if len(coord_sys) == 1:
# Only one coordinate systems is used, hence it is easy enough to
# actually evaluate the commutator.
if all(isinstance(v, BaseVectorField) for v in (v1, v2)):
return Zero()
bases_1, bases_2 = [list(v.atoms(BaseVectorField))
for v in (v1, v2)]
coeffs_1 = [v1.expand().coeff(b) for b in bases_1]
coeffs_2 = [v2.expand().coeff(b) for b in bases_2]
res = 0
for c1, b1 in zip(coeffs_1, bases_1):
for c2, b2 in zip(coeffs_2, bases_2):
res += c1*b1(c2)*b2 - c2*b2(c1)*b1
return res
else:
return super().__new__(cls, v1, v2)
def __init__(self, v1, v2):
super().__init__()
self._args = (v1, v2)
self._v1 = v1
self._v2 = v2
def __call__(self, scalar_field):
"""Apply on a scalar field.
If the argument is not a scalar field an error is raised.
"""
return self._v1(self._v2(scalar_field)) - self._v2(self._v1(scalar_field))
class Differential(Expr):
r"""Return the differential (exterior derivative) of a form field.
The differential of a form (i.e. the exterior derivative) has a complicated
definition in the general case.
The differential `df` of the 0-form `f` is defined for any vector field `v`
as `df(v) = v(f)`.
Examples
========
Use the predefined R2 manifold, setup some boilerplate.
>>> from diofant.diffgeom.rn import R2
Scalar field (0-forms):
>>> g = Function('g')
>>> s_field = g(R2.x, R2.y)
Vector fields:
>>> e_x, e_y, = R2.e_x, R2.e_y
Differentials:
>>> dg = Differential(s_field)
>>> dg
d(g(x, y))
>>> pprint(dg(e_x), use_unicode=False)
/ d \|
|-----(g(xi_1, y))||
\dxi_1 /|xi_1=x
>>> pprint(dg(e_y), use_unicode=False)
/ d \|
|-----(g(x, xi_2))||
\dxi_2 /|xi_2=y
Applying the exterior derivative operator twice always results in:
>>> Differential(dg)
0
"""
is_commutative = False
def __new__(cls, form_field):
if contravariant_order(form_field):
raise ValueError(
'A vector field was supplied as an argument to Differential.')
if isinstance(form_field, Differential):
return Zero()
else:
return super().__new__(cls, form_field)
def __init__(self, form_field):
super().__init__()
self._form_field = form_field
self._args = (self._form_field, )
def __call__(self, *vector_fields):
"""Apply on a list of vector_fields.
If the number of vector fields supplied is not equal to 1 + the order of
the form field inside the differential the result is undefined.
For 1-forms (i.e. differentials of scalar fields) the evaluation is
done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector
field, the differential is returned unchanged. This is done in order to
permit partial contractions for higher forms.
In the general case the evaluation is done by applying the form field
inside the differential on a list with one less elements than the number
of elements in the original list. Lowering the number of vector fields
is achieved through replacing each pair of fields by their
commutator.
If the arguments are not vectors or ``None``s an error is raised.
"""
if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None
for a in vector_fields):
raise ValueError('The arguments supplied to Differential should be vector fields or Nones.')
k = len(vector_fields)
if k == 1:
if vector_fields[0]:
return vector_fields[0].rcall(self._form_field)
return self
else:
# For higher form it is more complicated:
# Invariant formula:
# https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula
# df(v1, ... vn) = +/- vi(f(v1..no i..vn))
# +/- f([vi,vj],v1..no i, no j..vn)
f = self._form_field
v = vector_fields
ret = 0
for i in range(k):
t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:]))
ret += (-1)**i*t
for j in range(i + 1, k):
c = Commutator(v[i], v[j])
if c: # TODO this is ugly - the Commutator can be Zero and
# this causes the next line to fail
t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:])
ret += (-1)**(i + j)*t
return ret
class TensorProduct(Expr):
"""Tensor product of forms.
The tensor product permits the creation of multilinear functionals (i.e.
higher order tensors) out of lower order forms (e.g. 1-forms). However, the
higher tensors thus created lack the interesting features provided by the
other type of product, the wedge product, namely they are not antisymmetric
and hence are not form fields.
Examples
========
Use the predefined R2 manifold, setup some boilerplate.
>>> from diofant.diffgeom.rn import R2
>>> TensorProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y)
1
>>> TensorProduct(R2.dx, R2.dy)(R2.e_y, R2.e_x)
0
>>> TensorProduct(R2.dx, R2.x*R2.dy)(R2.x*R2.e_x, R2.e_y)
x**2
You can nest tensor products.
>>> tp1 = TensorProduct(R2.dx, R2.dy)
>>> TensorProduct(tp1, R2.dx)(R2.e_x, R2.e_y, R2.e_x)
1
You can make partial contraction for instance when 'raising an index'.
Putting ``None`` in the second argument of ``rcall`` means that the
respective position in the tensor product is left as it is.
>>> TP = TensorProduct
>>> metric = TP(R2.dx, R2.dx) + 3*TP(R2.dy, R2.dy)
>>> metric.rcall(R2.e_y, None)
3*dy
Or automatically pad the args with ``None`` without specifying them.
>>> metric.rcall(R2.e_y)
3*dy
"""
def __new__(cls, *args):
if any(contravariant_order(a) for a in args):
raise ValueError('A vector field was supplied as an argument to TensorProduct.')
scalar = Mul(*[m for m in args if covariant_order(m) == 0])
forms = [m for m in args if covariant_order(m)]
if forms:
if len(forms) == 1:
return scalar*forms[0]
return scalar*super().__new__(cls, *forms)
else:
return scalar
def __init__(self, *args):
super().__init__()
self._args = args
def __call__(self, *v_fields):
"""Apply on a list of vector_fields.
If the number of vector fields supplied is not equal to the order of
the form field the list of arguments is padded with ``None``'s.
The list of arguments is divided in sublists depending on the order of
the forms inside the tensor product. The sublists are provided as
arguments to these forms and the resulting expressions are given to the
constructor of ``TensorProduct``.
"""
tot_order = covariant_order(self)
tot_args = len(v_fields)
if tot_args != tot_order:
v_fields = list(v_fields) + [None]*(tot_order - tot_args)
orders = [covariant_order(f) for f in self.args]
indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)]
v_fields = [v_fields[i:j] for i, j in zip([0] + indices, indices + [None])]
multipliers = [t[0].rcall(*t[1]) for t in zip(self.args, v_fields)]
return TensorProduct(*multipliers)
def _latex(self, printer, *args):
elements = [printer._print(a) for a in self.args]
return r'\otimes'.join(elements)
class WedgeProduct(TensorProduct):
"""Wedge product of forms.
In the context of integration only completely antisymmetric forms make
sense. The wedge product permits the creation of such forms.
Examples
========
Use the predefined R2 manifold, setup some boilerplate.
>>> from diofant.diffgeom.rn import R2
>>> WedgeProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y)
1
>>> WedgeProduct(R2.dx, R2.dy)(R2.e_y, R2.e_x)
-1
>>> WedgeProduct(R2.dx, R2.x*R2.dy)(R2.x*R2.e_x, R2.e_y)
x**2
You can nest wedge products.
>>> wp1 = WedgeProduct(R2.dx, R2.dy)
>>> WedgeProduct(wp1, R2.dx)(R2.e_x, R2.e_y, R2.e_x)
0
"""
# TODO the calculation of signatures is slow
# TODO you do not need all these permutations (neither the prefactor)
def __call__(self, *vector_fields):
"""Apply on a list of vector_fields.
The expression is rewritten internally in terms of tensor products and evaluated.
"""
orders = (covariant_order(e) for e in self.args)
mul = 1/Mul(*(factorial(o) for o in orders))
perms = permutations(vector_fields)
perms_par = [Permutation(p).signature() for p in permutations(list(range(len(vector_fields))))]
tensor_prod = TensorProduct(*self.args)
return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])
class LieDerivative(Expr):
"""Lie derivative with respect to a vector field.
The transport operator that defines the Lie derivative is the pushforward of
the field to be derived along the integral curve of the field with respect
to which one derives.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> LieDerivative(R2.e_x, R2.y)
0
>>> LieDerivative(R2.e_x, R2.x)
1
>>> LieDerivative(R2.e_x, R2.e_x)
0
The Lie derivative of a tensor field by another tensor field is equal to
their commutator:
>>> LieDerivative(R2.e_x, R2.e_r)
Commutator(e_x, e_r)
>>> LieDerivative(R2.e_x + R2.e_y, R2.x)
1
>>> tp = TensorProduct(R2.dx, R2.dy)
>>> LieDerivative(R2.e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
>>> LieDerivative(R2.e_x, tp).doit()
LieDerivative(e_x, TensorProduct(dx, dy))
"""
def __new__(cls, v_field, expr):
expr_form_ord = covariant_order(expr)
if contravariant_order(v_field) != 1 or covariant_order(v_field):
raise ValueError('Lie derivatives are defined only with respect to'
' vector fields. The supplied argument was not a '
'vector field.')
if expr_form_ord > 0:
return super().__new__(cls, v_field, expr)
if expr.atoms(BaseVectorField):
return Commutator(v_field, expr)
else:
return v_field.rcall(expr)
def __init__(self, v_field, expr):
super().__init__()
self._v_field = v_field
self._expr = expr
self._args = (self._v_field, self._expr)
def __call__(self, *args):
v = self._v_field
expr = self._expr
lead_term = v(expr(*args))
rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:])
for i in range(len(args))])
return lead_term - rest
class BaseCovarDerivativeOp(Expr):
"""Covariant derivative operator with respect to a base vector.
Examples
========
>>> from diofant.diffgeom.rn import R2, R2_r
>>> TP = TensorProduct
>>> ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch)
>>> cvd(R2.x)
1
>>> cvd(R2.x*R2.e_x)
e_x
"""
def __init__(self, coord_sys, index, christoffel):
super().__init__()
self._coord_sys = coord_sys
self._index = Integer(index)
self._christoffel = ImmutableDenseNDimArray(christoffel)
self._args = self._coord_sys, self._index, self._christoffel
def __call__(self, field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field the behaviour is undefined.
"""
if covariant_order(field) != 0:
raise NotImplementedError
field = vectors_in_basis(field, self._coord_sys)
wrt_vector = self._coord_sys.base_vector(self._index)
wrt_scalar = self._coord_sys.coord_function(self._index)
vectors = list(field.atoms(BaseVectorField))
# First step: replace all vectors with something susceptible to
# derivation and do the derivation
# TODO: you need a real dummy function for the next line
d_funcs = [Function(f'_#_{i}')(wrt_scalar) for i,
b in enumerate(vectors)]
d_result = field.subs(list(zip(vectors, d_funcs)))
d_result = wrt_vector(d_result)
# Second step: backsubstitute the vectors in
d_result = d_result.subs(list(zip(d_funcs, vectors)))
# Third step: evaluate the derivatives of the vectors
derivs = []
for v in vectors:
d = Add(*[(self._christoffel[k, wrt_vector._index, v._index]
* v._coord_sys.base_vector(k))
for k in range(v._coord_sys.dim)])
derivs.append(d)
to_subs = [wrt_vector(d) for d in d_funcs]
result = d_result.subs(list(zip(to_subs, derivs)))
return result # TODO .doit() # XXX doit for the Subs instances
class CovarDerivativeOp(Expr):
"""Covariant derivative operator.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = CovarDerivativeOp(R2.x*R2.e_x, ch)
>>> cvd(R2.x)
x
>>> cvd(R2.x*R2.e_x)
x*e_x
"""
def __init__(self, wrt, christoffel):
super().__init__()
if len({v._coord_sys
for v in wrt.atoms(BaseVectorField)}) > 1: # pragma: no cover
raise NotImplementedError
if contravariant_order(wrt) != 1 or covariant_order(wrt):
raise ValueError('Covariant derivatives are defined only with '
'respect to vector fields. The supplied argument '
'was not a vector field.')
self._wrt = wrt
self._christoffel = ImmutableDenseNDimArray(christoffel)
self._args = self._wrt, self._christoffel
def __call__(self, field):
vectors = list(self._wrt.atoms(BaseVectorField))
base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel)
for v in vectors]
return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field)
def _latex(self, printer, *args):
return r'\mathbb{\nabla}_{%s}' % printer._print(self._wrt)
###############################################################################
# Integral curves on vector fields
###############################################################################
def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False):
r"""Return the series expansion for an integral curve of the field.
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This equation can also be decomposed of a basis of coordinate functions
`V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i`
This function returns a series expansion of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
See Also
========
intcurve_diffequ
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
n
the order to which to expand
coord_sys
the coordinate system in which to expand
coeffs (default False) - if True return a list of elements of the expansion
Examples
========
Use the predefined R2 manifold:
>>> from diofant.diffgeom.rn import R2_p, R2_r
Specify a starting point and a vector field:
>>> start_point = R2_r.point([x, y])
>>> vector_field = R2_r.e_x
Calculate the series:
>>> intcurve_series(vector_field, t, start_point, n=3)
Matrix([
[t + x],
[ y]])
Or get the elements of the expansion in a list:
>>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True)
>>> series[0]
Matrix([
[x],
[y]])
>>> series[1]
Matrix([
[t],
[0]])
>>> series[2]
Matrix([
[0],
[0]])
The series in the polar coordinate system:
>>> series = intcurve_series(vector_field, t, start_point,
... n=3, coord_sys=R2_p, coeffs=True)
>>> series[0]
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> series[1]
Matrix([
[t*x/sqrt(x**2 + y**2)],
[ -t*y/(x**2 + y**2)]])
>>> series[2]
Matrix([
[t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2],
[ t**2*x*y/(x**2 + y**2)**2]])
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
def iter_vfield(scalar_field, i):
"""Return ``vector_field`` called `i` times on ``scalar_field``."""
return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field)
def taylor_terms_per_coord(coord_function):
"""Return the series for one of the coordinates."""
return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i)
for i in range(n)]
coord_sys = coord_sys if coord_sys else start_point._coord_sys
coord_functions = coord_sys.coord_functions()
taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions]
if coeffs:
return [Matrix(t) for t in zip(*taylor_terms)]
else:
return Matrix([sum(c) for c in taylor_terms])
def intcurve_diffequ(vector_field, param, start_point, coord_sys=None):
r"""Return the differential equation for an integral curve of the field.
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This function returns the differential equation of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
See Also
========
intcurve_series
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
coord_sys
the coordinate system in which to give the equations
Returns
=======
a tuple of (equations, initial conditions)
Examples
========
Use the predefined R2 manifold:
>>> from diofant.diffgeom.rn import R2, R2_p, R2_r
Specify a starting point and a vector field:
>>> start_point = R2_r.point([0, 1])
>>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y
Get the equation:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point)
>>> equations
[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]
>>> init_cond
[f_0(0), f_1(0) - 1]
The series in the polar coordinate system:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p)
>>> equations
[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]
>>> init_cond
[f_0(0) - 1, f_1(0) - pi/2]
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
coord_sys = coord_sys if coord_sys else start_point._coord_sys
gammas = [Function(f'f_{i:d}')(param) for i in range(
start_point._coord_sys.dim)]
arbitrary_p = Point(coord_sys, gammas)
coord_functions = coord_sys.coord_functions()
equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p))
for cf in coord_functions]
init_cond = [simplify(cf.rcall(arbitrary_p).subs({param: 0}) - cf.rcall(start_point))
for cf in coord_functions]
return equations, init_cond
###############################################################################
# Helpers
###############################################################################
def dummyfy(args, exprs):
# TODO Is this a good idea?
d_args = Matrix([s.as_dummy() for s in args])
d_exprs = Matrix([sympify(expr).subs(list(zip(args, d_args))) for expr in exprs])
return d_args, d_exprs
###############################################################################
# Helpers
###############################################################################
def contravariant_order(expr, _strict=False):
"""Return the contravariant order of an expression.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> contravariant_order(a)
0
>>> contravariant_order(a*R2.x + 2)
0
>>> contravariant_order(a*R2.x*R2.e_y + R2.e_x)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [contravariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing contravariant fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [contravariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between vectors.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a vector.')
return 0
elif isinstance(expr, BaseVectorField):
return 1
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
def covariant_order(expr, _strict=False):
"""Return the covariant order of an expression.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> covariant_order(a)
0
>>> covariant_order(a*R2.x + 2)
0
>>> covariant_order(a*R2.x*R2.dy + R2.dx)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [covariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing form fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [covariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between forms.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a form.')
return 0
elif isinstance(expr, Differential):
return covariant_order(*expr.args) + 1
elif isinstance(expr, TensorProduct):
return sum(covariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
###############################################################################
# Coordinate transformation functions
###############################################################################
def vectors_in_basis(expr, to_sys):
"""Transform all base vectors in base vectors of a specified coord basis.
While the new base vectors are in the new coordinate system basis, any
coefficients are kept in the old system.
Examples
========
>>> from diofant.diffgeom.rn import R2_r, R2_p
>>> vectors_in_basis(R2_r.e_x, R2_p)
-y*e_theta/(x**2 + y**2) + x*e_r/sqrt(x**2 + y**2)
>>> vectors_in_basis(R2_p.e_r, R2_r)
sin(theta)*e_y + cos(theta)*e_x
"""
vectors = list(expr.atoms(BaseVectorField))
new_vectors = []
for v in vectors:
cs = v._coord_sys
jac = cs.jacobian(to_sys, cs.coord_functions())
new = (jac.T*Matrix(to_sys.base_vectors()))[v._index]
new_vectors.append(new)
return expr.subs(list(zip(vectors, new_vectors)))
###############################################################################
# Coordinate-dependent functions
###############################################################################
def twoform_to_matrix(expr):
"""Return the matrix representing the twoform.
For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`,
where `e_i` is the i-th base vector field for the coordinate system in
which the expression of `w` is given.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[1, 0],
[0, 1]])
>>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[x, 0],
[0, 1]])
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2)
Matrix([
[ 1, 0],
[-1/2, 1]])
"""
if covariant_order(expr) != 2 or contravariant_order(expr):
raise ValueError('The input expression is not a two-form.')
coord_sys = expr.atoms(CoordSystem)
if len(coord_sys) != 1:
raise ValueError('The input expression concerns more than one '
'coordinate systems, hence there is no unambiguous '
'way to choose a coordinate system for the matrix.')
coord_sys = coord_sys.pop()
vectors = coord_sys.base_vectors()
expr = expr.expand()
matrix_content = [[expr.rcall(v1, v2) for v1 in vectors]
for v2 in vectors]
return Matrix(matrix_content)
def metric_to_Christoffel_1st(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]]
"""
matrix = twoform_to_matrix(expr)
if not matrix.is_symmetric():
raise ValueError(
'The two-form representing the metric is not symmetric.')
coord_sys = expr.atoms(CoordSystem).pop()
deriv_matrices = [matrix.applyfunc(lambda a: d(a))
for d in coord_sys.base_vectors()]
indices = list(range(coord_sys.dim))
christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Christoffel_2nd(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]]
"""
ch_1st = metric_to_Christoffel_1st(expr)
coord_sys = expr.atoms(CoordSystem).pop()
indices = list(range(coord_sys.dim))
# XXX workaround, inverting a matrix does not work if it contains non
# symbols
# matrix = twoform_to_matrix(expr).inv()
matrix = twoform_to_matrix(expr)
s_fields = set()
for e in matrix:
s_fields.update(e.atoms(BaseScalarField))
s_fields = list(s_fields)
dums = coord_sys._dummies
matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields)))
# XXX end of workaround
christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices])
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Riemann_components(expr):
"""Return the components of the Riemann tensor expressed in a given basis.
Given a metric it calculates the components of the Riemann tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]
>>> non_trivial_metric = (exp(2*R2.r)*TP(R2.dr, R2.dr) +
... R2.r**2*TP(R2.dtheta, R2.dtheta))
>>> non_trivial_metric
E**(2*r)*TensorProduct(dr, dr) + r**2*TensorProduct(dtheta, dtheta)
>>> riemann = metric_to_Riemann_components(non_trivial_metric)
>>> riemann[0, :, :, :]
[[[0, 0], [0, 0]], [[0, E**(-2*r)*r], [-E**(-2*r)*r, 0]]]
>>> riemann[1, :, :, :]
[[[0, -1/r], [1/r, 0]], [[0, 0], [0, 0]]]
"""
ch_2nd = metric_to_Christoffel_2nd(expr)
coord_sys = expr.atoms(CoordSystem).pop()
indices = list(range(coord_sys.dim))
deriv_ch = [[[[d(ch_2nd[i, j, k])
for d in coord_sys.base_vectors()]
for k in indices]
for j in indices]
for i in indices]
riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices])
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
return ImmutableDenseNDimArray(riemann)
def metric_to_Ricci_components(expr):
"""Return the components of the Ricci tensor expressed in a given basis.
Given a metric it calculates the components of the Ricci tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from diofant.diffgeom.rn import R2
>>> TP = TensorProduct
>>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[0, 0], [0, 0]]
>>> non_trivial_metric = (exp(2*R2.r)*TP(R2.dr, R2.dr) +
... R2.r**2*TP(R2.dtheta, R2.dtheta))
>>> non_trivial_metric
E**(2*r)*TensorProduct(dr, dr) + r**2*TensorProduct(dtheta, dtheta)
>>> metric_to_Ricci_components(non_trivial_metric)
[[1/r, 0], [0, E**(-2*r)*r]]
"""
riemann = metric_to_Riemann_components(expr)
coord_sys = expr.atoms(CoordSystem).pop()
indices = list(range(coord_sys.dim))
ricci = [[Add(*[riemann[k, i, k, j] for k in indices])
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(ricci)
| {
"repo_name": "skirpichev/omg",
"path": "diofant/diffgeom/diffgeom.py",
"copies": "1",
"size": "54312",
"license": "bsd-3-clause",
"hash": -8605965319946006000,
"line_mean": 32.3611793612,
"line_max": 122,
"alpha_frac": 0.5766681396,
"autogenerated": false,
"ratio": 3.628298483532634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47049666231326337,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import permutations
from ..core import Add, Basic, Dummy, E, Eq, Integer, Mul, Wild, pi, sympify
from ..core.compatibility import ordered
from ..functions import (Ei, LambertW, Piecewise, acosh, asin, asinh, atan,
binomial, cos, cosh, cot, coth, erf, erfi, exp, li,
log, root, sin, sinh, sqrt, tan, tanh)
from ..logic import And
from ..polys import PolynomialError, cancel, factor, gcd, lcm, quo
from ..polys.constructor import construct_domain
from ..polys.monomials import itermonomials
from ..polys.polyroots import root_factors
from ..polys.solvers import solve_lin_sys
from ..utilities.iterables import uniq
def components(f, x):
"""
Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with with
minimal, positive exponents.
>>> components(sin(x)*cos(x)**2, x)
{x, sin(x), cos(x)}
See Also
========
diofant.integrals.heurisch.heurisch
"""
result = set()
if x in f.free_symbols:
if f.is_Symbol:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(root(f.base, f.exp.denominator))
else:
result |= components(f.exp, x) | {f}
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""Get vector of symbols local to this module."""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Dummy(f'{name}{len(lsyms):d}') )
return lsyms[:n]
def heurisch_wrapper(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None):
"""
A wrapper around the heurisch integration algorithm.
This method takes the result from heurisch and checks for poles in the
denominator. For each of these poles, the integral is reevaluated, and
the final integration result is given in terms of a Piecewise.
Examples
========
>>> heurisch(cos(n*x), x)
sin(n*x)/n
>>> heurisch_wrapper(cos(n*x), x)
Piecewise((x, Eq(n, 0)), (sin(n*x)/n, true))
See Also
========
diofant.integrals.heurisch.heurisch
"""
from ..solvers.solvers import solve, denoms
f = sympify(f)
if x not in f.free_symbols:
return f*x
res = heurisch(f, x, rewrite, hints, mappings, retries, degree_offset,
unnecessary_permutations)
if not isinstance(res, Basic):
return res
# We consider each denominator in the expression, and try to find
# cases where one or more symbolic denominator might be zero. The
# conditions for these cases are stored in the list slns.
slns = []
for d in denoms(res):
ds = list(ordered(d.free_symbols - {x}))
if ds:
slns += solve(d, *ds)
if not slns:
return res
slns = list(uniq(slns))
# Remove the solutions corresponding to poles in the original expression.
slns0 = []
for d in denoms(f):
ds = list(ordered(d.free_symbols - {x}))
if ds:
slns0 += solve(d, *ds)
slns = [s for s in slns if s not in slns0]
if not slns:
return res
if len(slns) > 1:
eqs = []
for sub_dict in slns:
eqs.extend([Eq(key, value) for key, value in sub_dict.items()])
slns = solve(eqs, *ordered(set().union(*[e.free_symbols
for e in eqs]) - {x})) + slns
# For each case listed in the list slns, we reevaluate the integral.
pairs = []
for sub_dict in slns:
expr = heurisch(f.subs(sub_dict), x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations)
cond = And(*[Eq(key, value) for key, value in sub_dict.items()])
pairs.append((expr, cond))
pairs.append((heurisch(f, x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations), True))
return Piecewise(*pairs)
def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None):
"""
Compute indefinite integral using heuristic Risch algorithm.
This is a heuristic approach to indefinite integration in finite
terms using the extended heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute the antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use toplevel
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Parameters
==========
f : Expr
expression
x : Symbol
variable
rewrite : Boolean, optional
force rewrite 'f' in terms of 'tan' and 'tanh', default False.
hints : None or list
a list of functions that may appear in anti-derivate. If
None (default) - no suggestions at all, if empty list - try
to figure out.
Examples
========
>>> heurisch(y*tan(x), x)
y*log(tan(x)**2 + 1)/2
References
==========
* Manuel Bronstein's "Poor Man's Integrator",
http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
See Also
========
diofant.integrals.integrals.Integral.doit
diofant.integrals.integrals.Integral
diofant.integrals.heurisch.components
"""
f = sympify(f)
if x not in f.free_symbols:
return f*x
if not f.is_Add:
indep, f = f.as_independent(x)
else:
indep = Integer(1)
rewritables = {
(sin, cos, cot): tan,
(sinh, cosh, coth): tanh,
}
if rewrite:
for candidates, rule in rewritables.items():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables:
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x])
for g in set(terms): # using copy of terms
if g.is_Function:
if isinstance(g, li):
M = g.args[0].match(a*x**b)
if M is not None:
terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
elif g.is_Pow:
if g.base is E:
M = g.exp.match(a*x**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*x))
else: # M[a].is_negative or unknown
terms.add(erf(sqrt(-M[a])*x))
M = g.exp.match(a*x**2 + b*x + c)
if M is not None:
if M[a].is_positive:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a])) *
erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a]))))
elif M[a].is_negative:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a])) *
erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a]))))
M = g.exp.match(a*log(x)**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a]))))
if M[a].is_negative:
terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a]))))
elif g.exp.is_Rational and g.exp.denominator == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
M = g.base.match(a*x**2 - b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(acosh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add((-M[b]/2*sqrt(-M[a]) *
atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b]))))
else:
terms |= set(hints)
for g in set(terms): # using copy of terms
terms |= components(cancel(g.diff(x)), x)
# TODO: caching is significant factor for why permutations work at all. Change this.
V = _symbols('x', len(terms))
# sort mapping expressions from largest to smallest (last is always x).
mapping = list(reversed(list(zip(*ordered( #
[(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) #
rev_mapping = {v: k for k, v in mapping} #
if mappings is None: #
# optimizing the number of permutations of mapping #
assert mapping[-1][0] == x # if not, find it and correct this comment
unnecessary_permutations = [mapping.pop(-1)]
mappings = permutations(mapping)
else:
unnecessary_permutations = unnecessary_permutations or []
def _substitute(expr):
return expr.subs(mapping)
for mapping in mappings:
mapping = list(mapping)
mapping = mapping + unnecessary_permutations
diffs = [ _substitute(cancel(g.diff(x))) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V):
denom = reduce(lambda p, q: lcm(p, q, *V), denoms)
break
else:
if not rewrite:
result = heurisch(f, x, rewrite=True, hints=hints,
unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
return
numers = [ cancel(denom*g) for g in diffs ]
def _derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def _deflation(p):
for y in V:
if not p.has(y):
continue
if _derivation(p) != 0:
c, q = p.as_poly(y).primitive()
return _deflation(c)*gcd(q, q.diff(y)).as_expr()
return p
def _splitter(p):
for y in V:
if not p.has(y):
continue
if _derivation(y) != 0:
c, q = p.as_poly(y).primitive()
q = q.as_expr()
h = gcd(q, _derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = _splitter(c)
if s.as_poly(y).degree() == 0:
return c_split[0], q*c_split[1]
q_split = _splitter(cancel(q / s))
return c_split[0]*q_split[0]*s, c_split[1]*q_split[1]
return Integer(1), p
special = {}
for term in terms:
if term.is_Function:
if isinstance(term, tan):
special[1 + _substitute(term)**2] = False
elif isinstance(term, tanh):
special[1 + _substitute(term)] = False
special[1 - _substitute(term)] = False
elif isinstance(term, LambertW):
special[_substitute(term)] = True
F = _substitute(f)
P, Q = F.as_numer_denom()
u_split = _splitter(denom)
v_split = _splitter(Q)
polys = set(list(v_split) + [u_split[0]] + list(special))
s = u_split[0] * Mul(*[ k for k, v in special.items() if v ])
polified = [ p.as_poly(*V) for p in [s, P, Q] ]
if None in polified:
return
# --- definitions for _integrate ---
a, b, c = [ p.total_degree() for p in polified ]
poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr()
def _exponent(g):
if g.is_Pow:
if g.exp.is_Rational and g.exp.denominator != 1:
if g.exp.numerator > 0:
return g.exp.numerator + g.exp.denominator - 1
else:
return abs(g.exp.numerator + g.exp.denominator)
else:
return 1
elif not g.is_Atom and g.args:
return max(_exponent(h) for h in g.args)
else:
return 1
A, B = _exponent(f), a + max(b, c)
degree = A + B + degree_offset
if A > 1 and B > 1:
degree -= 1
monoms = itermonomials(V, degree)
poly_coeffs = _symbols('A', binomial(len(V) + degree, degree))
poly_part = Add(*[ poly_coeffs[i]*monomial
for i, monomial in enumerate(ordered(monoms)) ])
reducibles = set()
for poly in polys:
if poly.has(*V):
try:
factorization = factor(poly, greedy=True)
except PolynomialError:
factorization = poly
factorization = poly
if factorization.is_Mul:
reducibles |= set(factorization.args)
else:
reducibles.add(factorization)
def _integrate(field=None):
irreducibles = set()
for poly in reducibles:
for z in poly.free_symbols:
if z in V:
break # should this be: `irreducibles |= \
else: # set(root_factors(poly, z, filter=field))`
continue # and the line below deleted?
# |
# V
irreducibles |= set(root_factors(poly, z, filter=field))
log_part = []
B = _symbols('B', len(irreducibles))
# Note: the ordering matters here
for poly, b in reversed(list(ordered(zip(irreducibles, B)))):
if poly.has(*V):
poly_coeffs.append(b)
log_part.append(b * log(poly))
# TODO: Currently it's better to use symbolic expressions here instead
# of rational functions, because it's simpler and FracElement doesn't
# give big speed improvement yet. This is because cancellation is slow
# due to slow polynomial GCD algorithms. If this gets improved then
# revise this code.
candidate = poly_part/poly_denom + Add(*log_part)
h = F - _derivation(candidate) / denom
raw_numer = h.as_numer_denom()[0]
# Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field
# that we have to determine. We can't use simply atoms() because log(3),
# sqrt(y) and similar expressions can appear, leading to non-trivial
# domains.
syms = set(poly_coeffs) | set(V)
non_syms = set()
def find_non_syms(expr):
if expr.is_Integer or expr.is_Rational:
pass # ignore trivial numbers
elif expr in syms:
pass # ignore variables
elif not expr.has(*syms):
non_syms.add(expr)
elif expr.is_Add or expr.is_Mul or expr.is_Pow:
list(map(find_non_syms, expr.args))
else:
# TODO: Non-polynomial expression. This should have been
# filtered out at an earlier stage.
raise PolynomialError
try:
find_non_syms(raw_numer)
except PolynomialError:
return
else:
ground, _ = construct_domain(non_syms, field=True)
coeff_ring = ground.poly_ring(*poly_coeffs)
ring = coeff_ring.poly_ring(*V)
try:
numer = ring.from_expr(raw_numer)
except ValueError:
raise PolynomialError
solution = solve_lin_sys(numer.coeffs(), coeff_ring)
if solution is not None:
solution = [(coeff_ring.symbols[coeff_ring.index(k)],
v.as_expr()) for k, v in solution.items()]
return candidate.subs(solution).subs(
list(zip(poly_coeffs, [Integer(0)]*len(poly_coeffs))))
if not (F.free_symbols - set(V)):
solution = _integrate('Q')
if solution is None:
solution = _integrate()
else:
solution = _integrate()
if solution is not None:
antideriv = solution.subs(rev_mapping)
antideriv = cancel(antideriv).expand(force=True)
if antideriv.is_Add:
antideriv = antideriv.as_independent(x)[1]
return indep*antideriv
else:
if retries >= 0:
result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
| {
"repo_name": "skirpichev/omg",
"path": "diofant/integrals/heurisch.py",
"copies": "1",
"size": "18281",
"license": "bsd-3-clause",
"hash": -7827721510920935000,
"line_mean": 32.7911275416,
"line_max": 156,
"alpha_frac": 0.527542257,
"autogenerated": false,
"ratio": 3.773168214654283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48007104716542826,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from itertools import product
from operator import mul
import numpy as np
from .core import Array
from .utils import meta_from_array
from ..base import tokenize
from ..core import flatten
from ..highlevelgraph import HighLevelGraph
from ..utils import M
def reshape_rechunk(inshape, outshape, inchunks):
assert all(isinstance(c, tuple) for c in inchunks)
ii = len(inshape) - 1
oi = len(outshape) - 1
result_inchunks = [None for i in range(len(inshape))]
result_outchunks = [None for i in range(len(outshape))]
while ii >= 0 or oi >= 0:
if inshape[ii] == outshape[oi]:
result_inchunks[ii] = inchunks[ii]
result_outchunks[oi] = inchunks[ii]
ii -= 1
oi -= 1
continue
din = inshape[ii]
dout = outshape[oi]
if din == 1:
result_inchunks[ii] = (1,)
ii -= 1
elif dout == 1:
result_outchunks[oi] = (1,)
oi -= 1
elif din < dout: # (4, 4, 4) -> (64,)
ileft = ii - 1
while (
ileft >= 0 and reduce(mul, inshape[ileft : ii + 1]) < dout
): # 4 < 64, 4*4 < 64, 4*4*4 == 64
ileft -= 1
if reduce(mul, inshape[ileft : ii + 1]) != dout:
raise ValueError("Shapes not compatible")
for i in range(ileft + 1, ii + 1): # need single-shape dimensions
result_inchunks[i] = (inshape[i],) # chunks[i] = (4,)
chunk_reduction = reduce(mul, map(len, inchunks[ileft + 1 : ii + 1]))
result_inchunks[ileft] = expand_tuple(inchunks[ileft], chunk_reduction)
prod = reduce(mul, inshape[ileft + 1 : ii + 1]) # 16
result_outchunks[oi] = tuple(
prod * c for c in result_inchunks[ileft]
) # (1, 1, 1, 1) .* 16
oi -= 1
ii = ileft - 1
elif din > dout: # (64,) -> (4, 4, 4)
oleft = oi - 1
while oleft >= 0 and reduce(mul, outshape[oleft : oi + 1]) < din:
oleft -= 1
if reduce(mul, outshape[oleft : oi + 1]) != din:
raise ValueError("Shapes not compatible")
# TODO: don't coalesce shapes unnecessarily
cs = reduce(mul, outshape[oleft + 1 : oi + 1])
result_inchunks[ii] = contract_tuple(inchunks[ii], cs) # (16, 16, 16, 16)
for i in range(oleft + 1, oi + 1):
result_outchunks[i] = (outshape[i],)
result_outchunks[oleft] = tuple(c // cs for c in result_inchunks[ii])
oi = oleft - 1
ii -= 1
return tuple(result_inchunks), tuple(result_outchunks)
def expand_tuple(chunks, factor):
"""
>>> expand_tuple((2, 4), 2)
(1, 1, 2, 2)
>>> expand_tuple((2, 4), 3)
(1, 1, 1, 1, 2)
>>> expand_tuple((3, 4), 2)
(1, 2, 2, 2)
>>> expand_tuple((7, 4), 3)
(2, 2, 3, 1, 1, 2)
"""
if factor == 1:
return chunks
out = []
for c in chunks:
x = c
part = max(x / factor, 1)
while x >= 2 * part:
out.append(int(part))
x -= int(part)
if x:
out.append(x)
assert sum(chunks) == sum(out)
return tuple(out)
def contract_tuple(chunks, factor):
""" Return simple chunks tuple such that factor divides all elements
Examples
--------
>>> contract_tuple((2, 2, 8, 4), 4)
(4, 8, 4)
"""
assert sum(chunks) % factor == 0
out = []
residual = 0
for chunk in chunks:
chunk += residual
div = chunk // factor
residual = chunk % factor
good = factor * div
if good:
out.append(good)
return tuple(out)
def reshape(x, shape):
""" Reshape array to new shape
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in `row-major order`_
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
.. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
from .slicing import sanitize_index
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError("can only specify one unknown dimension")
# Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x
# for this case only.
if len(shape) == 1 and x.ndim == 1:
return x
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError(
"Array chunk size or shape is unknown. shape: %s\n\n"
"Possible solution with x.compute_chunk_sizes()" % x.shape
)
if reduce(mul, shape, 1) != x.size:
raise ValueError("total size of new array must be unchanged")
if x.shape == shape:
return x
meta = meta_from_array(x, len(shape))
name = "reshape-" + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x.__dask_keys__()))
dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
# Logic for how to rechunk
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])
return Array(graph, name, outchunks, meta=meta)
| {
"repo_name": "ContinuumIO/dask",
"path": "dask/array/reshape.py",
"copies": "2",
"size": "6447",
"license": "bsd-3-clause",
"hash": -4772955976431810000,
"line_mean": 30.4487804878,
"line_max": 86,
"alpha_frac": 0.5521948193,
"autogenerated": false,
"ratio": 3.4329073482428116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4985102167542812,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from math import ceil
from typing import Union
from hwt.doc_markers import internal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.utils import walkFlattenFields
from hwt.hdl.value import HValue
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
class BitWidthErr(Exception):
"""
Wrong bit width of signal/value
"""
def fitTo_t(what: Union[RtlSignalBase, HValue], where_t: HdlType,
extend: bool=True, shrink: bool=True):
"""
Slice signal "what" to fit in "where"
or
arithmetically (for signed by MSB / unsigned, vector with 0) extend
"what" to same width as "where"
little-endian impl.
:param extend: allow increasing of the signal width
:param shrink: allow shrinking of the signal width
"""
whatWidth = what._dtype.bit_length()
toWidth = where_t.bit_length()
if toWidth == whatWidth:
return what
elif toWidth < whatWidth:
# slice
if not shrink:
raise BitWidthErr()
return what[toWidth:]
else:
if not extend:
raise BitWidthErr()
w = toWidth - whatWidth
if what._dtype.signed:
# signed extension
msb = what[whatWidth - 1]
ext = reduce(lambda a, b: a._concat(b), [msb for _ in range(w)])
else:
# 0 extend
ext = Bits(w).from_py(0)
return ext._concat(what)
def fitTo(what: Union[RtlSignalBase, HValue], where: Union[RtlSignalBase, HValue],
extend: bool=True, shrink: bool=True):
return fitTo_t(what, where._dtype, extend, shrink)
class NotEnoughtBitsErr(Exception):
"""
More bits is required for such an operation
"""
class BitWalker():
"""
Walker which can walk chunks of bits on signals/values of all types
:ivar ~.sigOrVal: signal or value to iterate over
:ivar ~.fillup: flag that means that if there is not enough bits
for last item fill it up with invalid bits (otherwise raise)
"""
def __init__(self, sigOrVal: Union[RtlSignalBase, HValue],
skipPadding: bool=True,
fillup: bool=False):
"""
:param skipPadding: if true padding is skipped in dense types
"""
self.it = walkFlattenFields(sigOrVal, skipPadding=skipPadding)
self.fillup = fillup
self.actuallyHave = 0
self.actual = None
self.actualOffset = 0
@internal
def _get(self, numberOfBits: int, doCollect: bool):
"""
:param numberOfBits: number of bits to get from actual position
:param doCollect: if False output is not collected just iterator moves
in data structure
"""
if not isinstance(numberOfBits, int):
numberOfBits = int(numberOfBits)
while self.actuallyHave < numberOfBits:
# accumulate while not has enough
try:
f = next(self.it)
except StopIteration:
if self.fillup and self.actual is not None:
break
else:
raise NotEnoughtBitsErr()
thisFieldLen = f._dtype.bit_length()
if self.actual is None:
if not doCollect and thisFieldLen <= numberOfBits:
numberOfBits -= thisFieldLen
else:
self.actual = f
self.actuallyHave = thisFieldLen
else:
if not doCollect and self.actuallyHave < numberOfBits:
self.actuallyHave = thisFieldLen
self.actual = f
else:
self.actuallyHave += thisFieldLen
self.actual = f._concat(self.actual)
# slice out from actual
actual = self.actual
actualOffset = self.actualOffset
if self.actuallyHave < numberOfBits:
assert self.fillup
if doCollect:
t = self.actual._dtype
fillupW = numberOfBits - self.actuallyHave
padding_t = Bits(fillupW, signed=t.signed, negated=t.negated)
padding = padding_t.from_py(None)
actual = padding._concat(actual)
self.actuallyHave = 0
self.actualOffset = 0
else:
# update about what was taken
self.actuallyHave -= numberOfBits
self.actualOffset += numberOfBits
if self.actuallyHave == 0:
self.actual = None
self.actualOffset = 0
if doCollect:
if numberOfBits == 1:
return actual[actualOffset]
else:
return actual[(actualOffset + numberOfBits):actualOffset]
def get(self, numberOfBits: int) -> Union[RtlSignalBase, HValue]:
"""
:param numberOfBits: number of bits to get from actual position
:return: chunk of bits of specified size (instance of Value or RtlSignal)
"""
return self._get(numberOfBits, True)
def skip(self, numberOfBits: int) -> None:
"""
Move this iterator without care about item
:param numberOfBits: number of bits to get from actual position
"""
self._get(numberOfBits, False)
def assertIsOnEnd(self):
"""
Assert there is nothing left in this iterator
"""
try:
next(self.it)
except StopIteration:
return
raise AssertionError("there are still some items")
def iterBits(sigOrVal: Union[RtlSignalBase, HValue], bitsInOne: int=1,
skipPadding: bool=True, fillup: bool=False):
"""
Iterate over bits in vector
:param sigOrVal: signal or value to iterate over
:param bitsInOne: number of bits in one part
:param skipPadding: if true padding is skipped in dense types
:param fillup: flag that means that if there is not enough bits
for last item fill it up with invalid bits (otherwise raise)
"""
bw = BitWalker(sigOrVal, skipPadding, fillup)
try:
bit_len = sigOrVal._dtype.bit_length()
except TypeError:
bit_len = None
if bit_len is None:
try:
while True:
yield bw.get(bitsInOne)
except NotEnoughtBitsErr:
return
else:
for _ in range(ceil(bit_len / bitsInOne)):
yield bw.get(bitsInOne)
bw.assertIsOnEnd()
| {
"repo_name": "Nic30/HWToolkit",
"path": "hwt/synthesizer/vectorUtils.py",
"copies": "1",
"size": "6524",
"license": "mit",
"hash": 8261053081077151000,
"line_mean": 30.2153110048,
"line_max": 82,
"alpha_frac": 0.5893623544,
"autogenerated": false,
"ratio": 4.230869001297017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320231355697017,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.