hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4577df150e0de677cd366f5bf958d6cbeb0911
| 2,234
|
py
|
Python
|
src/the_tale/the_tale/game/heroes/conf.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/the_tale/the_tale/game/heroes/conf.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/the_tale/the_tale/game/heroes/conf.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import smart_imports
smart_imports.all()
NAME_REGEX = r'^[\-\ а-яА-Я«»\'ёЁ]+$' if not django_settings.TESTS_RUNNING else r'^[\-\ а-яА-Я«»\'\,ёЁ]+$'
settings = utils_app_settings.app_settings('HEROES',
USE_ABILITY_CHANCE=0.1,
MESSAGES_LOG_LENGTH=10,
DIARY_LOG_LENGTH=50,
MIN_PVP_BATTLES=25,
UI_CACHING_KEY='hero_ui_%d',
# not cache livetime, but time period after setupped ui_caching_started_at
# in which ui_caching is turned on
UI_CACHING_TIME=10 * 60,
# time before caching end, when we send next cache command
UI_CACHING_CONTINUE_TIME=60,
# cache livetime
UI_CACHING_TIMEOUT=60,
# should we dump cached heroes to database
DUMP_CACHED_HEROES=False,
START_ENERGY_BONUS=10,
MAX_HELPS_IN_TURN=10,
NAME_REGEX=NAME_REGEX,
NAME_SYMBOLS_DESCRITION='пробел, -, а-я, А-Я, «», \' ',
NAME_MIN_LENGHT=3,
ABILITIES_RESET_TIMEOUT=datetime.timedelta(days=30),
UNLOAD_TIMEOUT=c.TURN_DELTA * 3,
RARE_OPERATIONS_INTERVAL=1000,
INACTIVE_HERO_DELAY=int(10), # для неактивных героев замедлять время в N раз
TT_DIARY_ENTRY_POINT='http://localhost:10001/',
MAX_HERO_DESCRIPTION_LENGTH=10000,
REMOVE_HERO_DELAY=10*60)
| 44.68
| 120
| 0.389884
|
import smart_imports
smart_imports.all()
NAME_REGEX = r'^[\-\ а-яА-Я«»\'ёЁ]+$' if not django_settings.TESTS_RUNNING else r'^[\-\ а-яА-Я«»\'\,ёЁ]+$'
settings = utils_app_settings.app_settings('HEROES',
USE_ABILITY_CHANCE=0.1,
MESSAGES_LOG_LENGTH=10,
DIARY_LOG_LENGTH=50,
MIN_PVP_BATTLES=25,
UI_CACHING_KEY='hero_ui_%d',
UI_CACHING_TIME=10 * 60,
UI_CACHING_CONTINUE_TIME=60,
UI_CACHING_TIMEOUT=60,
DUMP_CACHED_HEROES=False,
START_ENERGY_BONUS=10,
MAX_HELPS_IN_TURN=10,
NAME_REGEX=NAME_REGEX,
NAME_SYMBOLS_DESCRITION='пробел, -, а-я, А-Я, «», \' ',
NAME_MIN_LENGHT=3,
ABILITIES_RESET_TIMEOUT=datetime.timedelta(days=30),
UNLOAD_TIMEOUT=c.TURN_DELTA * 3,
RARE_OPERATIONS_INTERVAL=1000,
INACTIVE_HERO_DELAY=int(10), # для неактивных героев замедлять время в N раз
TT_DIARY_ENTRY_POINT='http://localhost:10001/',
MAX_HERO_DESCRIPTION_LENGTH=10000,
REMOVE_HERO_DELAY=10*60)
| true
| true
|
1c4579efb456751f3e85a187b14430807fac4cfc
| 1,051
|
py
|
Python
|
solutions/093.restore-ip-addresses/restore-ip-addresses.py
|
wangsongiam/leetcode
|
96ff21bca1871816ae51fccb1fa13587b378dc50
|
[
"MIT"
] | 3
|
2018-11-25T15:19:57.000Z
|
2019-09-28T03:01:11.000Z
|
solutions/093.restore-ip-addresses/restore-ip-addresses.py
|
casprwang/leetcode
|
96ff21bca1871816ae51fccb1fa13587b378dc50
|
[
"MIT"
] | null | null | null |
solutions/093.restore-ip-addresses/restore-ip-addresses.py
|
casprwang/leetcode
|
96ff21bca1871816ae51fccb1fa13587b378dc50
|
[
"MIT"
] | 3
|
2018-02-11T20:23:44.000Z
|
2020-06-05T15:39:56.000Z
|
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
ret = []
def traverse(pos, cnt, tmp):
print(tmp)
nonlocal ret, s
if cnt == 0:
if not s[pos:]:
return
if len(s[pos:]) > 1 and s[pos:][0] == '0':
return
if int(s[pos:]) < 256 and int(s[pos:]) > -1:
ret.append(tmp + s[pos:])
return
if (cnt + 1) * 3 < len(s) - pos:
return
for i in range(1, 4): # 1 2 3
if pos + i >= len(s):
return
if len(s[pos:pos+i]) > 1 and s[pos:pos+i][0] == '0':
continue
if int(s[pos:pos+i]) < 0 or int(s[pos:pos+i]) > 255 and s[pos:pos+i][0] != '0':
continue
traverse(pos + i, cnt - 1, tmp + s[pos:pos+i] + '.')
traverse(0, 3, '')
return ret
| 26.948718
| 95
| 0.358706
|
class Solution:
def restoreIpAddresses(self, s):
ret = []
def traverse(pos, cnt, tmp):
print(tmp)
nonlocal ret, s
if cnt == 0:
if not s[pos:]:
return
if len(s[pos:]) > 1 and s[pos:][0] == '0':
return
if int(s[pos:]) < 256 and int(s[pos:]) > -1:
ret.append(tmp + s[pos:])
return
if (cnt + 1) * 3 < len(s) - pos:
return
for i in range(1, 4):
if pos + i >= len(s):
return
if len(s[pos:pos+i]) > 1 and s[pos:pos+i][0] == '0':
continue
if int(s[pos:pos+i]) < 0 or int(s[pos:pos+i]) > 255 and s[pos:pos+i][0] != '0':
continue
traverse(pos + i, cnt - 1, tmp + s[pos:pos+i] + '.')
traverse(0, 3, '')
return ret
| true
| true
|
1c457a9117673b494d492a9f4ab781bd3957996b
| 1,632
|
py
|
Python
|
Data Scientist Career Path/12. Foundations of Machine Learning Unsupervised Learning/2. KMeans++/1. intro.py
|
myarist/Codecademy
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
[
"MIT"
] | 23
|
2021-06-06T15:35:55.000Z
|
2022-03-21T06:53:42.000Z
|
Data Scientist Career Path/12. Foundations of Machine Learning Unsupervised Learning/2. KMeans++/1. intro.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Scientist Career Path/12. Foundations of Machine Learning Unsupervised Learning/2. KMeans++/1. intro.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 9
|
2021-06-08T01:32:04.000Z
|
2022-03-18T15:38:09.000Z
|
import codecademylib3_seaborn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import random
import timeit
mu = 1
std = 0.5
np.random.seed(100)
xs = np.append(np.append(np.append(np.random.normal(0.25,std,100), np.random.normal(0.75,std,100)), np.random.normal(0.25,std,100)), np.random.normal(0.75,std,100))
ys = np.append(np.append(np.append(np.random.normal(0.25,std,100), np.random.normal(0.25,std,100)), np.random.normal(0.75,std,100)), np.random.normal(0.75,std,100))
values = list(zip(xs, ys))
model = KMeans(init='random', n_clusters=2)
results = model.fit_predict(values)
print("The inertia of model that randomly initialized centroids is " + str(model.inertia_))
colors = ['#6400e4', '#ffc740']
plt.subplot(211)
for i in range(2):
points = np.array([values[j] for j in range(len(values)) if results[j] == i])
plt.scatter(points[:, 0], points[:, 1], c=colors[i], alpha=0.6)
plt.title('Codecademy Mobile Feedback - Centroids Initialized Randomly')
plt.xlabel('Learn Python')
plt.ylabel('Learn SQL')
plt.subplot(212)
model = KMeans( n_clusters=2)
results = model.fit_predict(values)
print("The inertia of model that initialized the centroids using KMeans++ is " + str(model.inertia_))
colors = ['#6400e4', '#ffc740']
for i in range(2):
points = np.array([values[j] for j in range(len(values)) if results[j] == i])
plt.scatter(points[:, 0], points[:, 1], c=colors[i], alpha=0.6)
plt.title('Codecademy Mobile Feedback - Centroids Initialized Using KMeans++')
plt.xlabel('Learn Python')
plt.ylabel('Learn SQL')
plt.tight_layout()
plt.show()
| 27.2
| 164
| 0.712623
|
import codecademylib3_seaborn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import random
import timeit
mu = 1
std = 0.5
np.random.seed(100)
xs = np.append(np.append(np.append(np.random.normal(0.25,std,100), np.random.normal(0.75,std,100)), np.random.normal(0.25,std,100)), np.random.normal(0.75,std,100))
ys = np.append(np.append(np.append(np.random.normal(0.25,std,100), np.random.normal(0.25,std,100)), np.random.normal(0.75,std,100)), np.random.normal(0.75,std,100))
values = list(zip(xs, ys))
model = KMeans(init='random', n_clusters=2)
results = model.fit_predict(values)
print("The inertia of model that randomly initialized centroids is " + str(model.inertia_))
colors = ['#6400e4', '#ffc740']
plt.subplot(211)
for i in range(2):
points = np.array([values[j] for j in range(len(values)) if results[j] == i])
plt.scatter(points[:, 0], points[:, 1], c=colors[i], alpha=0.6)
plt.title('Codecademy Mobile Feedback - Centroids Initialized Randomly')
plt.xlabel('Learn Python')
plt.ylabel('Learn SQL')
plt.subplot(212)
model = KMeans( n_clusters=2)
results = model.fit_predict(values)
print("The inertia of model that initialized the centroids using KMeans++ is " + str(model.inertia_))
colors = ['#6400e4', '#ffc740']
for i in range(2):
points = np.array([values[j] for j in range(len(values)) if results[j] == i])
plt.scatter(points[:, 0], points[:, 1], c=colors[i], alpha=0.6)
plt.title('Codecademy Mobile Feedback - Centroids Initialized Using KMeans++')
plt.xlabel('Learn Python')
plt.ylabel('Learn SQL')
plt.tight_layout()
plt.show()
| true
| true
|
1c457b258f46e8b97aa913da1acea83fba03eaed
| 944
|
py
|
Python
|
rrpython/tests/types/test_str.py
|
afoolsbag/rrPython
|
cb4d376b7c02e39d4e88163f272456ebb9eeafc9
|
[
"Unlicense"
] | null | null | null |
rrpython/tests/types/test_str.py
|
afoolsbag/rrPython
|
cb4d376b7c02e39d4e88163f272456ebb9eeafc9
|
[
"Unlicense"
] | null | null | null |
rrpython/tests/types/test_str.py
|
afoolsbag/rrPython
|
cb4d376b7c02e39d4e88163f272456ebb9eeafc9
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
r"""
字符串类型。
::
+-> Container: obj.__contains__(self, item) # item in obj
|
+-> Sized: obj.__len__(self) # len(obj)
|
+-> Iterable: obj.__iter__(self) # iter(obj)
|
+-> Collection
|
| +-> Iterable: obj.__iter__(self) # iter(obj)
| |
+-> Reversible: obj.__reversed__(self) # reversed(obj)
|
+-> Sequence: obj.__getitem__(self, index) # obj[index]
| obj.count(self, value)
| obj.index(self, value, start=0, stop=None)
|
str
Notes
-----
- `字符串类型 <https://docs.python.org/zh-cn/3/library/stdtypes.html#text-sequence-type-str>`_
"""
__version__ = '2020.09.27'
__since__ = '2020.09.24'
__author__ = 'zhengrr'
__license__ = 'UNLICENSE'
from typing import Sequence
def test_issubclass() -> None:
assert issubclass(str, Sequence)
| 23.02439
| 89
| 0.54661
|
__version__ = '2020.09.27'
__since__ = '2020.09.24'
__author__ = 'zhengrr'
__license__ = 'UNLICENSE'
from typing import Sequence
def test_issubclass() -> None:
assert issubclass(str, Sequence)
| true
| true
|
1c457bc8969abfb76d85c1df6226dd8f0956c564
| 13,447
|
py
|
Python
|
lime/optics.py
|
binggu56/lime
|
07f60c5105f0bedb11ac389fd671f4f1737a71fe
|
[
"MIT"
] | 4
|
2020-01-15T11:52:23.000Z
|
2021-01-05T19:40:36.000Z
|
lime/optics.py
|
binggu56/lime
|
07f60c5105f0bedb11ac389fd671f4f1737a71fe
|
[
"MIT"
] | null | null | null |
lime/optics.py
|
binggu56/lime
|
07f60c5105f0bedb11ac389fd671f4f1737a71fe
|
[
"MIT"
] | 3
|
2020-02-14T07:10:44.000Z
|
2021-04-14T17:49:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 17:26:02 2019
@author: binggu
"""
import numpy as np
from scipy.sparse import lil_matrix, csr_matrix, kron, identity, linalg
from numpy import sqrt, exp, pi
import matplotlib.pyplot as plt
from lime.units import au2k, au2ev
from lime.fft import fft2
from lime.phys import rect, sinc, dag, interval
from lime.style import set_style, imshow
from numba import jit
class Pulse:
def __init__(self, tau, omegac, delay=0., amplitude=0.001, cep=0., beta=0):
"""
(linearly chirped) Gaussian pulse
The positive frequency component reads
E = A/2 * exp(-(t-t0)^2/2/T^2) * exp[-i w (t-t0)(1 + beta (t-t0)/T)]
A: electric field amplitude
T: time delay
sigma: duration
"""
self.delay = delay
self.tau = tau
self.sigma = tau # for compatibility only
self.omegac = omegac # central frequency
self.unit = 'au'
self.amplitude = amplitude
self.cep = cep
self.bandwidth = 1./tau
self.duration = 2. * tau
self.beta = beta # linear chirping rate, dimensionless
self.ndim = 1
def envelop(self, t):
return np.exp(-(t-self.delay)**2/2./self.tau**2)
def spectrum(self, omega):
"""
Fourier transform of the Gaussian pulse
"""
omega0 = self.omegac
T = self.tau
A0 = self.amplitude
beta = self.beta
# if beta is None:
# return A0 * sigma * np.sqrt(2.*np.pi) * np.exp(-(omega-omega0)**2 * sigma**2/2.)
# else:
a = 0.5/T**2 + 1j * beta * omega0/T
return A0 * np.sqrt(np.pi/a) * np.exp(-(omega - omega0)**2/4./a)
def field(self, t):
'''
electric field
'''
return self.efield(t)
def efield(self, t):
"""
Parameters
----------
t : TYPE
DESCRIPTION.
Returns
-------
electric field at time t.
"""
omegac = self.omegac
t0 = self.delay
a = self.amplitude
tau = self.sigma
beta = self.beta
#
# if beta is None:
# return a * np.exp(-(t-delay)**2/2./sigma**2)*np.cos(omegac * (t-delay))
# else:
E = a * np.exp(-(t-t0)**2/2./tau**2)*np.exp(-1j * omegac * (t-t0))\
* np.exp(-1j * beta * omegac * (t-t0)**2/tau)
return E.real
def spectrogram(self, efield):
# from tftb.processing import WignerVilleDistribution
# wvd = WignerVilleDistribution(z)
# w, ts, fs = wvd.run()
return
# def heaviside(x):
# """
# Heaviside function defined in a grid.
# returns 0 if x<=0, and 1 if x>0
# """
# x = np.asarray(x)
# y = np.zeros(x.shape)
# y[x > 0] = 1.0
# return y
class Biphoton:
def __init__(self, omegap, bw, Te, p=None, q=None, phase_matching='sinc'):
"""
Class for entangled photon pair.
Parameters
----------
omegap: float
pump carrier frequency
bw: float
pump bandwidth
p: signal grid
q: idler grid
phase_matching: str
type of phase matching. Default is 'sinc'. A narrowband approxmation is invoked.
"""
self.omegap = omegap
self.pump_bandwidth = bw
self.phase_matching = phase_matching
self.signal_center_frequency = omegap / 2.
self.idler_center_frequency = omegap / 2.
self.entanglement_time = Te
self.jsa = None
self.jta = None
self.p = p
self.q = q
if p is not None:
self.dp = interval(p)
self.dq = interval(q)
self.grid = [p, q]
def pump(self, bandwidth):
"""
pump pulse envelope
Parameters
----------
bandwidth
Returns
-------
"""
alpha = np.sqrt(1. / (np.sqrt(2. * np.pi) * bandwidth)) * \
np.exp(-(p + q) ** 2 / 4. / bandwidth ** 2)
return alpha
def set_grid(self, p, q):
self.p = p
self.q = q
return
def get_jsa(self):
"""
Returns
-------
jsa: array
joint spectral amplitude
"""
p = self.p
q = self.q
bw = self.pump_bandwidth
self.jsa = _jsa(p, q, bw, model=self.phase_matching,
Te=self.entanglement_time)
return self.jsa
def get_jta(self):
"""
Compute the joint temporal amplitude J(ts, ti) over a temporal meshgrid.
Returns
-------
ts: 1d array
signal time grid
ti: 1d array
idler temporal grid
jta: 2d array
joint temporal amplitude
"""
p = self.p
q = self.q
dp = p[1] - p[0]
dq = q[1] - q[0]
if self.jsa is not None:
ts, ti, jta = fft2(self.jsa, dp, dq)
self.jta = jta
return ts, ti, jta
else:
raise ValueError('jsa is None. Call get_jsa() first.')
def jta(self, ts, ti):
return
def detect(self):
"""
two-photon detection amplitude in a temporal grid defined by
the spectral grid.
Returns
-------
t1: 1d array
t2: 1d array
d: detection amplitude in the temporal grid (t1, t2)
"""
if self.jsa is None:
raise ValueError('Please call get_jsa() to compute the jsa first.')
bw = self.pump_bandwidth
omega_s = self.signal_center_frequency
omega_i = self.idler_center_frequency
p = self.p
q = self.q
dp = p[1] - p[0]
dq = q[1] - q[0]
return _detection_amplitude(self.jsa, omega_s, omega_i, dp, dq)
def detect_si(self):
pass
def detect_is(self):
pass
def g2(self):
pass
def bandwidth(self, which='signal'):
"""
Compute the bandwidth of the signal/idler mode
Parameters
----------
which : TYPE, optional
DESCRIPTION. The default is 'signal'.
Returns
-------
None.
"""
p, q = self.p, self.q
dp = interval(p)
dq = interval(q)
f = self.jsa
if which == 'signal':
rho = rdm(f, dq, which='x')
sigma = sqrt(rho.diagonal().dot(p**2) * dp)
elif which == 'idler':
rho = rdm(f, dp, which='y')
sigma = sqrt(rho.diagonal().dot(q**2) * dq)
return sigma
def plt_jsa(self, xlabel=None, ylabel=None, fname=None):
if self.jsa is None:
self.get_jsa()
plt, ax = imshow(self.p * au2ev, self.q * au2ev, np.abs(self.jsa))
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_xlabel(ylabel)
if fname is not None:
plt.savefig(fname)
plt.show()
return ax
def rdm(self, which='signal'):
if which == 'signal':
return rdm(self.jsa, dy=self.dq, which='x')
def jta(t2, t1, omegap, sigmap, Te):
"""
Analytical form for the joint temporal amplitude for SPDC type-II
two-photon state.
Note that two single-photon electric field prefactors are neglected.
Parameters
----------
t2 : TYPE
DESCRIPTION.
t1 : TYPE
DESCRIPTION.
Returns
-------
None.
"""
omegas = omegap/2.
omegai = omegap/2.
tau = t2 - t1
amp = sqrt(sigmap/Te) * (2.*pi)**(3./4) * \
rect(tau/2./Te) * exp(-sigmap**2*(t1+t2)**2/4.) *\
exp(-1j * omegas * t1 - 1j*omegai * t2)
return amp
def rdm(f, dx=1, dy=1, which='x'):
'''
Compute the reduced density matrix by tracing out the other dof for a 2D wavefunction
Parameters
----------
f : 2D array
2D wavefunction
dx : float, optional
DESCRIPTION. The default is 1.
dy : float, optional
DESCRIPTION. The default is 1.
which: str
indicator which rdm is required. Default is 'x'.
Returns
-------
rho1 : TYPE
Reduced density matrix
'''
if which == 'x':
rho = f.dot(dag(f)) * dy
elif which == 'y':
rho = f.T.dot(np.conj(f)) * dx
else:
raise ValueError('The argument which can only be x or y.')
return rho
def _jsa(p, q, pump_bw, model='sinc', Te=None):
'''
Construct the joint spectral amplitude
Parameters
----------
p : 1d array
signal frequency (detuning from the center frequency)
q : 1d array
idler frequency
pump_bw : float
pump bandwidth
sm : float
1/entanglement time
Te : float
Entanglement time.
Returns
-------
jsa : TYPE
DESCRIPTION.
'''
P, Q = np.meshgrid(p, q)
sigma_plus = pump_bw
sigma_minus = 1. / Te
# pump envelope
alpha = np.sqrt(1. / (np.sqrt(2. * np.pi) * sigma_plus)) * \
np.exp(-(P + Q) ** 2 / 4. / sigma_plus ** 2)
# phase-matching function
if model == 'Gaussian':
beta = np.sqrt(1. / np.sqrt(2. * np.pi) / sigma_minus) * \
np.exp(-(P - Q) ** 2 / 4. / sigma_minus ** 2)
jsa = sqrt(2) * alpha * beta
elif model == 'sinc':
beta = sqrt(0.5 * Te / np.pi) * sinc(Te * (P - Q) / 4.)
# const = np.trace(dag(f).dot(f))*dq*dp
jsa = alpha * beta
return jsa
def hom(p, q, f, tau):
"""
HOM coincidence probability
Parameters
----------
p
q
f
tau
method: str
"brute": directly integrating the JSA over the frequency grid
"schmidt": compute the signal using the Schmidt modes of the
entangled light
nmodes
Returns
-------
prob: 1d array
coincidence probability
"""
dp = interval(p)
dq = interval(q)
P, Q = np.meshgrid(p, q)
prob = np.zeros(len(tau))
for j in range(len(tau)):
t = tau[j]
prob[j] = 0.5 - 0.5 * np.sum(f.conj() * f.T *
np.exp(1j * (P - Q) * t)).real * dq*dp
return prob
def hom_schmidt(p, q, f, method='rdm', nmodes=5):
"""
HOM signal with Schmidt modes
Parameters
----------
p
q
f
nmodes
Returns
-------
"""
dp = interval(p)
dq = interval(q)
# schmidt decompose the JSA
s, phi, chi = schmidt_decompose(f, dp, dq, method=method,
nmodes=nmodes)
prob = np.zeros(len(tau))
for j in range(len(tau)):
t = tau[j]
for a in range(nmodes):
for b in range(nmodes):
tmp1 = (phi[:,a].conj() * chi[:, b] * np.exp(1j * p * t)).sum() * dp
tmp2 = (phi[:,b] * chi[:, a].conj() * np.exp(-1j * q * t)).sum() * dq
prob[j] += -2. * np.real(s[a] * s[b] * tmp1 * tmp2)
prob = 0.5 + prob/4.
return prob
def schmidt_decompose(f, dp, dq, nmodes=5, method='rdm'):
"""
kernel method
f: 2D array,
input function to be decomposed
nmodes: int
number of modes to be kept
method: str
rdm or svd
"""
if method == 'rdm':
kernel1 = f.dot(dag(f)) * dq * dp
kernel2 = f.T.dot(f.conj()) * dp * dq
print('c: Schmidt coefficients')
s, phi = np.linalg.eig(kernel1)
s1, psi = np.linalg.eig(kernel2)
phi /= np.sqrt(dp)
psi /= np.sqrt(dq)
elif method == 'svd':
raise NotImplementedError
return np.sqrt(s[:nmodes]), phi[:, :nmodes], psi[:, :nmodes]
def _detection_amplitude(jsa, omega1, omega2, dp, dq):
'''
Detection amplitude <0|E(t)E(t')|Phi>
t, t' are defined on a 2D grid used in the FFT,
E(t) = Es(t) + Ei(t) is the total electric field operator.
This contains two amplitudes corresponding to two different
ordering of photon interactions
<0|T Ei(t)Es(t')|Phi> + <0|T Es(t)Ei(t')|Phi>
The t, t' are defined relative to t0, i.e, they are temporal durations from t0.
Parameters
----------
jsa : TYPE
DESCRIPTION.
m : TYPE
DESCRIPTION.
n : TYPE
DESCRIPTION.
omega1 : float
central frequency of signal beam
omega2 : float
central frequency of idler beam
Returns
-------
d : TYPE
DESCRIPTION.
'''
t1, t2, jta = fft2(jsa, dp, dq)
dt2 = t2[1] - t2[0]
T1, T2 = np.meshgrid(t1, t2)
# detection amplitude d(t1, t2) ~ JTA(t2, t1)
d = np.exp(-1j * omega2 * T1 - 1j * omega1 * T2) * \
np.sqrt(omega1 * omega2) * jta.T + \
np.exp(-1j * omega1 * T1 - 1j * omega2 * T2) * \
np.sqrt(omega1 * omega2) * jta
# amp = np.einsum('ij, ij -> i', d, heaviside(T1 - T2) * \
# np.exp(-1j * gap20 * (T1-T2))) * dt2
return t1, t2, d
if __name__ == '__main__':
from lime.units import au2ev, au2fs
p = np.linspace(-2, 2, 128) / au2ev
q = p
epp = Biphoton(omegap=3 / au2ev, bw=0.2 / au2ev, Te=10/au2fs,
p=p, q=q)
JSA = epp.get_jsa()
# epp.plt_jsa()
# t1, t2, d = epp.detect()
tau = np.linspace(-10, 10)/au2fs
prob = hom(p, q, JSA, tau)
fig, ax = plt.subplots()
ax.plot(tau, prob)
plt.show()
| 22.791525
| 93
| 0.512605
|
import numpy as np
from scipy.sparse import lil_matrix, csr_matrix, kron, identity, linalg
from numpy import sqrt, exp, pi
import matplotlib.pyplot as plt
from lime.units import au2k, au2ev
from lime.fft import fft2
from lime.phys import rect, sinc, dag, interval
from lime.style import set_style, imshow
from numba import jit
class Pulse:
def __init__(self, tau, omegac, delay=0., amplitude=0.001, cep=0., beta=0):
self.delay = delay
self.tau = tau
self.sigma = tau
self.omegac = omegac
self.unit = 'au'
self.amplitude = amplitude
self.cep = cep
self.bandwidth = 1./tau
self.duration = 2. * tau
self.beta = beta
self.ndim = 1
def envelop(self, t):
return np.exp(-(t-self.delay)**2/2./self.tau**2)
def spectrum(self, omega):
omega0 = self.omegac
T = self.tau
A0 = self.amplitude
beta = self.beta
a = 0.5/T**2 + 1j * beta * omega0/T
return A0 * np.sqrt(np.pi/a) * np.exp(-(omega - omega0)**2/4./a)
def field(self, t):
return self.efield(t)
def efield(self, t):
omegac = self.omegac
t0 = self.delay
a = self.amplitude
tau = self.sigma
beta = self.beta
E = a * np.exp(-(t-t0)**2/2./tau**2)*np.exp(-1j * omegac * (t-t0))\
* np.exp(-1j * beta * omegac * (t-t0)**2/tau)
return E.real
def spectrogram(self, efield):
return
# Heaviside function defined in a grid.
# returns 0 if x<=0, and 1 if x>0
# """
class Biphoton:
def __init__(self, omegap, bw, Te, p=None, q=None, phase_matching='sinc'):
self.omegap = omegap
self.pump_bandwidth = bw
self.phase_matching = phase_matching
self.signal_center_frequency = omegap / 2.
self.idler_center_frequency = omegap / 2.
self.entanglement_time = Te
self.jsa = None
self.jta = None
self.p = p
self.q = q
if p is not None:
self.dp = interval(p)
self.dq = interval(q)
self.grid = [p, q]
def pump(self, bandwidth):
alpha = np.sqrt(1. / (np.sqrt(2. * np.pi) * bandwidth)) * \
np.exp(-(p + q) ** 2 / 4. / bandwidth ** 2)
return alpha
def set_grid(self, p, q):
self.p = p
self.q = q
return
def get_jsa(self):
p = self.p
q = self.q
bw = self.pump_bandwidth
self.jsa = _jsa(p, q, bw, model=self.phase_matching,
Te=self.entanglement_time)
return self.jsa
def get_jta(self):
p = self.p
q = self.q
dp = p[1] - p[0]
dq = q[1] - q[0]
if self.jsa is not None:
ts, ti, jta = fft2(self.jsa, dp, dq)
self.jta = jta
return ts, ti, jta
else:
raise ValueError('jsa is None. Call get_jsa() first.')
def jta(self, ts, ti):
return
def detect(self):
if self.jsa is None:
raise ValueError('Please call get_jsa() to compute the jsa first.')
bw = self.pump_bandwidth
omega_s = self.signal_center_frequency
omega_i = self.idler_center_frequency
p = self.p
q = self.q
dp = p[1] - p[0]
dq = q[1] - q[0]
return _detection_amplitude(self.jsa, omega_s, omega_i, dp, dq)
def detect_si(self):
pass
def detect_is(self):
pass
def g2(self):
pass
def bandwidth(self, which='signal'):
p, q = self.p, self.q
dp = interval(p)
dq = interval(q)
f = self.jsa
if which == 'signal':
rho = rdm(f, dq, which='x')
sigma = sqrt(rho.diagonal().dot(p**2) * dp)
elif which == 'idler':
rho = rdm(f, dp, which='y')
sigma = sqrt(rho.diagonal().dot(q**2) * dq)
return sigma
def plt_jsa(self, xlabel=None, ylabel=None, fname=None):
if self.jsa is None:
self.get_jsa()
plt, ax = imshow(self.p * au2ev, self.q * au2ev, np.abs(self.jsa))
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_xlabel(ylabel)
if fname is not None:
plt.savefig(fname)
plt.show()
return ax
def rdm(self, which='signal'):
if which == 'signal':
return rdm(self.jsa, dy=self.dq, which='x')
def jta(t2, t1, omegap, sigmap, Te):
omegas = omegap/2.
omegai = omegap/2.
tau = t2 - t1
amp = sqrt(sigmap/Te) * (2.*pi)**(3./4) * \
rect(tau/2./Te) * exp(-sigmap**2*(t1+t2)**2/4.) *\
exp(-1j * omegas * t1 - 1j*omegai * t2)
return amp
def rdm(f, dx=1, dy=1, which='x'):
if which == 'x':
rho = f.dot(dag(f)) * dy
elif which == 'y':
rho = f.T.dot(np.conj(f)) * dx
else:
raise ValueError('The argument which can only be x or y.')
return rho
def _jsa(p, q, pump_bw, model='sinc', Te=None):
P, Q = np.meshgrid(p, q)
sigma_plus = pump_bw
sigma_minus = 1. / Te
alpha = np.sqrt(1. / (np.sqrt(2. * np.pi) * sigma_plus)) * \
np.exp(-(P + Q) ** 2 / 4. / sigma_plus ** 2)
if model == 'Gaussian':
beta = np.sqrt(1. / np.sqrt(2. * np.pi) / sigma_minus) * \
np.exp(-(P - Q) ** 2 / 4. / sigma_minus ** 2)
jsa = sqrt(2) * alpha * beta
elif model == 'sinc':
beta = sqrt(0.5 * Te / np.pi) * sinc(Te * (P - Q) / 4.)
jsa = alpha * beta
return jsa
def hom(p, q, f, tau):
dp = interval(p)
dq = interval(q)
P, Q = np.meshgrid(p, q)
prob = np.zeros(len(tau))
for j in range(len(tau)):
t = tau[j]
prob[j] = 0.5 - 0.5 * np.sum(f.conj() * f.T *
np.exp(1j * (P - Q) * t)).real * dq*dp
return prob
def hom_schmidt(p, q, f, method='rdm', nmodes=5):
dp = interval(p)
dq = interval(q)
s, phi, chi = schmidt_decompose(f, dp, dq, method=method,
nmodes=nmodes)
prob = np.zeros(len(tau))
for j in range(len(tau)):
t = tau[j]
for a in range(nmodes):
for b in range(nmodes):
tmp1 = (phi[:,a].conj() * chi[:, b] * np.exp(1j * p * t)).sum() * dp
tmp2 = (phi[:,b] * chi[:, a].conj() * np.exp(-1j * q * t)).sum() * dq
prob[j] += -2. * np.real(s[a] * s[b] * tmp1 * tmp2)
prob = 0.5 + prob/4.
return prob
def schmidt_decompose(f, dp, dq, nmodes=5, method='rdm'):
if method == 'rdm':
kernel1 = f.dot(dag(f)) * dq * dp
kernel2 = f.T.dot(f.conj()) * dp * dq
print('c: Schmidt coefficients')
s, phi = np.linalg.eig(kernel1)
s1, psi = np.linalg.eig(kernel2)
phi /= np.sqrt(dp)
psi /= np.sqrt(dq)
elif method == 'svd':
raise NotImplementedError
return np.sqrt(s[:nmodes]), phi[:, :nmodes], psi[:, :nmodes]
def _detection_amplitude(jsa, omega1, omega2, dp, dq):
t1, t2, jta = fft2(jsa, dp, dq)
dt2 = t2[1] - t2[0]
T1, T2 = np.meshgrid(t1, t2)
d = np.exp(-1j * omega2 * T1 - 1j * omega1 * T2) * \
np.sqrt(omega1 * omega2) * jta.T + \
np.exp(-1j * omega1 * T1 - 1j * omega2 * T2) * \
np.sqrt(omega1 * omega2) * jta
return t1, t2, d
if __name__ == '__main__':
from lime.units import au2ev, au2fs
p = np.linspace(-2, 2, 128) / au2ev
q = p
epp = Biphoton(omegap=3 / au2ev, bw=0.2 / au2ev, Te=10/au2fs,
p=p, q=q)
JSA = epp.get_jsa()
tau = np.linspace(-10, 10)/au2fs
prob = hom(p, q, JSA, tau)
fig, ax = plt.subplots()
ax.plot(tau, prob)
plt.show()
| true
| true
|
1c457ce30654b4e60fe6ac59186a1c9d26859b54
| 10,225
|
py
|
Python
|
glance_docker/glance/common/auth.py
|
tobegit3hub/dockerized-software
|
3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632
|
[
"Apache-2.0"
] | null | null | null |
glance_docker/glance/common/auth.py
|
tobegit3hub/dockerized-software
|
3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632
|
[
"Apache-2.0"
] | null | null | null |
glance_docker/glance/common/auth.py
|
tobegit3hub/dockerized-software
|
3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This auth module is intended to allow OpenStack client-tools to select from a
variety of authentication strategies, including NoAuth (the default), and
Keystone (an identity management system).
> auth_plugin = AuthPlugin(creds)
> auth_plugin.authenticate()
> auth_plugin.auth_token
abcdefg
> auth_plugin.management_url
http://service_endpoint/
"""
import httplib2
from oslo_log import log as logging
from oslo_serialization import jsonutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
class BaseStrategy(object):
def __init__(self):
self.auth_token = None
# TODO(sirp): Should expose selecting public/internal/admin URL.
self.management_url = None
def authenticate(self):
raise NotImplementedError
@property
def is_authenticated(self):
raise NotImplementedError
@property
def strategy(self):
raise NotImplementedError
class NoAuthStrategy(BaseStrategy):
def authenticate(self):
pass
@property
def is_authenticated(self):
return True
@property
def strategy(self):
return 'noauth'
class KeystoneStrategy(BaseStrategy):
MAX_REDIRECTS = 10
def __init__(self, creds, insecure=False, configure_via_auth=True):
self.creds = creds
self.insecure = insecure
self.configure_via_auth = configure_via_auth
super(KeystoneStrategy, self).__init__()
def check_auth_params(self):
# Ensure that supplied credential parameters are as required
for required in ('username', 'password', 'auth_url',
'strategy'):
if self.creds.get(required) is None:
raise exception.MissingCredentialError(required=required)
if self.creds['strategy'] != 'keystone':
raise exception.BadAuthStrategy(expected='keystone',
received=self.creds['strategy'])
# For v2.0 also check tenant is present
if self.creds['auth_url'].rstrip('/').endswith('v2.0'):
if self.creds.get("tenant") is None:
raise exception.MissingCredentialError(required='tenant')
def authenticate(self):
"""Authenticate with the Keystone service.
There are a few scenarios to consider here:
1. Which version of Keystone are we using? v1 which uses headers to
pass the credentials, or v2 which uses a JSON encoded request body?
2. Keystone may respond back with a redirection using a 305 status
code.
3. We may attempt a v1 auth when v2 is what's called for. In this
case, we rewrite the url to contain /v2.0/ and retry using the v2
protocol.
"""
def _authenticate(auth_url):
# If OS_AUTH_URL is missing a trailing slash add one
if not auth_url.endswith('/'):
auth_url += '/'
token_url = urlparse.urljoin(auth_url, "tokens")
# 1. Check Keystone version
is_v2 = auth_url.rstrip('/').endswith('v2.0')
if is_v2:
self._v2_auth(token_url)
else:
self._v1_auth(token_url)
self.check_auth_params()
auth_url = self.creds['auth_url']
for _ in range(self.MAX_REDIRECTS):
try:
_authenticate(auth_url)
except exception.AuthorizationRedirect as e:
# 2. Keystone may redirect us
auth_url = e.url
except exception.AuthorizationFailure:
# 3. In some configurations nova makes redirection to
# v2.0 keystone endpoint. Also, new location does not
# contain real endpoint, only hostname and port.
if 'v2.0' not in auth_url:
auth_url = urlparse.urljoin(auth_url, 'v2.0/')
else:
# If we successfully auth'd, then memorize the correct auth_url
# for future use.
self.creds['auth_url'] = auth_url
break
else:
# Guard against a redirection loop
raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS)
def _v1_auth(self, token_url):
creds = self.creds
headers = {
'X-Auth-User': creds['username'],
'X-Auth-Key': creds['password']
}
tenant = creds.get('tenant')
if tenant:
headers['X-Auth-Tenant'] = tenant
resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
def _management_url(self, resp):
for url_header in ('x-image-management-url',
'x-server-management-url',
'x-glance'):
try:
return resp[url_header]
except KeyError as e:
not_found = e
raise not_found
if resp.status in (200, 204):
try:
if self.configure_via_auth:
self.management_url = _management_url(self, resp)
self.auth_token = resp['x-auth-token']
except KeyError:
raise exception.AuthorizationFailure()
elif resp.status == 305:
raise exception.AuthorizationRedirect(uri=resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
def _v2_auth(self, token_url):
creds = self.creds
creds = {
"auth": {
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
headers = {'Content-Type': 'application/json'}
req_body = jsonutils.dumps(creds)
resp, resp_body = self._do_request(
token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = jsonutils.loads(resp_body)['access']
creds_region = self.creds.get('region')
if self.configure_via_auth:
endpoint = get_endpoint(resp_auth['serviceCatalog'],
endpoint_region=creds_region)
self.management_url = endpoint
self.auth_token = resp_auth['token']['id']
elif resp.status == 305:
raise exception.RedirectException(resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
@property
def is_authenticated(self):
return self.auth_token is not None
@property
def strategy(self):
return 'keystone'
def _do_request(self, url, method, headers=None, body=None):
headers = headers or {}
conn = httplib2.Http()
conn.force_exception_to_status_code = True
conn.disable_ssl_certificate_validation = self.insecure
headers['User-Agent'] = 'glance-client'
resp, resp_body = conn.request(url, method, headers=headers, body=body)
return resp, resp_body
def get_plugin_from_strategy(strategy, creds=None, insecure=False,
configure_via_auth=True):
if strategy == 'noauth':
return NoAuthStrategy()
elif strategy == 'keystone':
return KeystoneStrategy(creds, insecure,
configure_via_auth=configure_via_auth)
else:
raise Exception(_("Unknown auth strategy '%s'") % strategy)
def get_endpoint(service_catalog, service_type='image', endpoint_region=None,
endpoint_type='publicURL'):
"""
Select an endpoint from the service catalog
We search the full service catalog for services
matching both type and region. If the client
supplied no region then any 'image' endpoint
is considered a match. There must be one -- and
only one -- successful match in the catalog,
otherwise we will raise an exception.
"""
endpoint = None
for service in service_catalog:
s_type = None
try:
s_type = service['type']
except KeyError:
msg = _('Encountered service with no "type": %s') % s_type
LOG.warn(msg)
continue
if s_type == service_type:
for ep in service['endpoints']:
if endpoint_region is None or endpoint_region == ep['region']:
if endpoint is not None:
# This is a second match, abort
raise exception.RegionAmbiguity(region=endpoint_region)
endpoint = ep
if endpoint and endpoint.get(endpoint_type):
return endpoint[endpoint_type]
else:
raise exception.NoServiceEndpoint()
| 34.897611
| 79
| 0.603619
|
import httplib2
from oslo_log import log as logging
from oslo_serialization import jsonutils
from six.moves import range
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
class BaseStrategy(object):
def __init__(self):
self.auth_token = None
self.management_url = None
def authenticate(self):
raise NotImplementedError
@property
def is_authenticated(self):
raise NotImplementedError
@property
def strategy(self):
raise NotImplementedError
class NoAuthStrategy(BaseStrategy):
def authenticate(self):
pass
@property
def is_authenticated(self):
return True
@property
def strategy(self):
return 'noauth'
class KeystoneStrategy(BaseStrategy):
MAX_REDIRECTS = 10
def __init__(self, creds, insecure=False, configure_via_auth=True):
self.creds = creds
self.insecure = insecure
self.configure_via_auth = configure_via_auth
super(KeystoneStrategy, self).__init__()
def check_auth_params(self):
for required in ('username', 'password', 'auth_url',
'strategy'):
if self.creds.get(required) is None:
raise exception.MissingCredentialError(required=required)
if self.creds['strategy'] != 'keystone':
raise exception.BadAuthStrategy(expected='keystone',
received=self.creds['strategy'])
if self.creds['auth_url'].rstrip('/').endswith('v2.0'):
if self.creds.get("tenant") is None:
raise exception.MissingCredentialError(required='tenant')
def authenticate(self):
def _authenticate(auth_url):
if not auth_url.endswith('/'):
auth_url += '/'
token_url = urlparse.urljoin(auth_url, "tokens")
is_v2 = auth_url.rstrip('/').endswith('v2.0')
if is_v2:
self._v2_auth(token_url)
else:
self._v1_auth(token_url)
self.check_auth_params()
auth_url = self.creds['auth_url']
for _ in range(self.MAX_REDIRECTS):
try:
_authenticate(auth_url)
except exception.AuthorizationRedirect as e:
auth_url = e.url
except exception.AuthorizationFailure:
if 'v2.0' not in auth_url:
auth_url = urlparse.urljoin(auth_url, 'v2.0/')
else:
# for future use.
self.creds['auth_url'] = auth_url
break
else:
# Guard against a redirection loop
raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS)
def _v1_auth(self, token_url):
creds = self.creds
headers = {
'X-Auth-User': creds['username'],
'X-Auth-Key': creds['password']
}
tenant = creds.get('tenant')
if tenant:
headers['X-Auth-Tenant'] = tenant
resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
def _management_url(self, resp):
for url_header in ('x-image-management-url',
'x-server-management-url',
'x-glance'):
try:
return resp[url_header]
except KeyError as e:
not_found = e
raise not_found
if resp.status in (200, 204):
try:
if self.configure_via_auth:
self.management_url = _management_url(self, resp)
self.auth_token = resp['x-auth-token']
except KeyError:
raise exception.AuthorizationFailure()
elif resp.status == 305:
raise exception.AuthorizationRedirect(uri=resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
def _v2_auth(self, token_url):
creds = self.creds
creds = {
"auth": {
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
headers = {'Content-Type': 'application/json'}
req_body = jsonutils.dumps(creds)
resp, resp_body = self._do_request(
token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = jsonutils.loads(resp_body)['access']
creds_region = self.creds.get('region')
if self.configure_via_auth:
endpoint = get_endpoint(resp_auth['serviceCatalog'],
endpoint_region=creds_region)
self.management_url = endpoint
self.auth_token = resp_auth['token']['id']
elif resp.status == 305:
raise exception.RedirectException(resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
@property
def is_authenticated(self):
return self.auth_token is not None
@property
def strategy(self):
return 'keystone'
def _do_request(self, url, method, headers=None, body=None):
headers = headers or {}
conn = httplib2.Http()
conn.force_exception_to_status_code = True
conn.disable_ssl_certificate_validation = self.insecure
headers['User-Agent'] = 'glance-client'
resp, resp_body = conn.request(url, method, headers=headers, body=body)
return resp, resp_body
def get_plugin_from_strategy(strategy, creds=None, insecure=False,
configure_via_auth=True):
if strategy == 'noauth':
return NoAuthStrategy()
elif strategy == 'keystone':
return KeystoneStrategy(creds, insecure,
configure_via_auth=configure_via_auth)
else:
raise Exception(_("Unknown auth strategy '%s'") % strategy)
def get_endpoint(service_catalog, service_type='image', endpoint_region=None,
endpoint_type='publicURL'):
endpoint = None
for service in service_catalog:
s_type = None
try:
s_type = service['type']
except KeyError:
msg = _('Encountered service with no "type": %s') % s_type
LOG.warn(msg)
continue
if s_type == service_type:
for ep in service['endpoints']:
if endpoint_region is None or endpoint_region == ep['region']:
if endpoint is not None:
# This is a second match, abort
raise exception.RegionAmbiguity(region=endpoint_region)
endpoint = ep
if endpoint and endpoint.get(endpoint_type):
return endpoint[endpoint_type]
else:
raise exception.NoServiceEndpoint()
| true
| true
|
1c457cf430666778cca067fee9e66d2b156178b1
| 2,193
|
py
|
Python
|
ropgenerator/exploit/syscall/SyscallLinuxX86.py
|
avltree9798/ropgenerator
|
c63c81f03e8653dc3911e21300c00003a4224f6a
|
[
"MIT"
] | 1
|
2021-01-07T13:16:19.000Z
|
2021-01-07T13:16:19.000Z
|
ropgenerator/exploit/syscall/SyscallLinuxX86.py
|
avltree9798/ropgenerator
|
c63c81f03e8653dc3911e21300c00003a4224f6a
|
[
"MIT"
] | null | null | null |
ropgenerator/exploit/syscall/SyscallLinuxX86.py
|
avltree9798/ropgenerator
|
c63c81f03e8653dc3911e21300c00003a4224f6a
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# SycallLinuxX86 module: build syscalls for linux on X64
from ropgenerator.exploit.syscall.SyscallGeneric import Syscall, ArgType
from ropgenerator.core.Architecture import *
mprotect = Syscall('int', 'mprotect', \
[('void*', 'addr'),('size_t','len'),('int','prot')], [ArgType.INT, ArgType.INT, ArgType.INT],\
[RegX86.EBX, RegX86.ECX, RegX86.EDX], [(RegX86.EAX, 0x7d)])
execve = Syscall('int', 'execve', \
[('char*', 'cmd'),('char**','argv'),('char**', 'envp')], [ArgType.STRING, ArgType.INT,ArgType.INT],\
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 11)] )
read = Syscall('int', 'read', \
[('unsigned int','fd'),('char*','buf'),('size_t','count')], [ArgType.INT, ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 3)])
write = Syscall('int', 'write', \
[('unsigned int','fd'),('const char*','buf'),('size_t','count')], [ArgType.INT, ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 4)])
chmod = Syscall('int', 'chmod', \
[('const char*','filename'),('mode_t','mode')], [ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX, RegX86.ECX], [(RegX86.EAX,15)])
setuid = Syscall('int', 'setuid', \
[('uid_t', 'uid')], [ArgType.INT], \
[RegX86.EBX], [(RegX86.EAX,23)])
# MMAP with flags = MAP_ANONYMOUS (no fd and offset)
mmap_anon = Syscall('void*', 'mmap_anon', \
[('unsigned long','addr'),('unsigned long','len'),('unsigned long','prot')],\
[ArgType.INT, ArgType.INT, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX,9), ('esi', 0x20)], function="mmap")
syscalls_list = [mprotect, execve, read, write, chmod, setuid]
## All available syscalls
available = dict()
for syscall_object in syscalls_list:
available[syscall_object.name()] = syscall_object
####################
# Useful functions #
####################
def is_supported(syscall_name):
return (syscall_name in available)
def get_syscall(syscall_name):
if( not syscall_name in available ):
return None
return available[syscall_name]
def available_syscalls():
global available
return available
| 37.810345
| 122
| 0.632011
|
from ropgenerator.exploit.syscall.SyscallGeneric import Syscall, ArgType
from ropgenerator.core.Architecture import *
mprotect = Syscall('int', 'mprotect', \
[('void*', 'addr'),('size_t','len'),('int','prot')], [ArgType.INT, ArgType.INT, ArgType.INT],\
[RegX86.EBX, RegX86.ECX, RegX86.EDX], [(RegX86.EAX, 0x7d)])
execve = Syscall('int', 'execve', \
[('char*', 'cmd'),('char**','argv'),('char**', 'envp')], [ArgType.STRING, ArgType.INT,ArgType.INT],\
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 11)] )
read = Syscall('int', 'read', \
[('unsigned int','fd'),('char*','buf'),('size_t','count')], [ArgType.INT, ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 3)])
write = Syscall('int', 'write', \
[('unsigned int','fd'),('const char*','buf'),('size_t','count')], [ArgType.INT, ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX, 4)])
chmod = Syscall('int', 'chmod', \
[('const char*','filename'),('mode_t','mode')], [ArgType.INT_OR_STRING, ArgType.INT], \
[RegX86.EBX, RegX86.ECX], [(RegX86.EAX,15)])
setuid = Syscall('int', 'setuid', \
[('uid_t', 'uid')], [ArgType.INT], \
[RegX86.EBX], [(RegX86.EAX,23)])
mmap_anon = Syscall('void*', 'mmap_anon', \
[('unsigned long','addr'),('unsigned long','len'),('unsigned long','prot')],\
[ArgType.INT, ArgType.INT, ArgType.INT], \
[RegX86.EBX,RegX86.ECX,RegX86.EDX], [(RegX86.EAX,9), ('esi', 0x20)], function="mmap")
syscalls_list = [mprotect, execve, read, write, chmod, setuid]
yscall_object in syscalls_list:
available[syscall_object.name()] = syscall_object
| true
| true
|
1c457d19c80113b1224bc3ece869c3003a166dee
| 690
|
py
|
Python
|
molecule/default/tests/test_default.py
|
dhs-ncats/ansible-role-htop
|
a7848a00693e9e841e3546d879968704228b47a4
|
[
"CC0-1.0"
] | null | null | null |
molecule/default/tests/test_default.py
|
dhs-ncats/ansible-role-htop
|
a7848a00693e9e841e3546d879968704228b47a4
|
[
"CC0-1.0"
] | null | null | null |
molecule/default/tests/test_default.py
|
dhs-ncats/ansible-role-htop
|
a7848a00693e9e841e3546d879968704228b47a4
|
[
"CC0-1.0"
] | null | null | null |
"""Module containing the tests for the default scenario."""
# Standard Python Libraries
import os
# Third-Party Libraries
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
@pytest.mark.parametrize("pkg", ["htop"])
def test_packages(host, pkg):
"""Test that the appropriate packages were installed."""
package = host.package(pkg)
assert package.is_installed
@pytest.mark.parametrize("file", ["/etc/htoprc"])
def test_files(host, file):
"""Test that config files were copied over as expected."""
f = host.file(file)
assert f.exists
| 23.793103
| 63
| 0.731884
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
@pytest.mark.parametrize("pkg", ["htop"])
def test_packages(host, pkg):
package = host.package(pkg)
assert package.is_installed
@pytest.mark.parametrize("file", ["/etc/htoprc"])
def test_files(host, file):
f = host.file(file)
assert f.exists
| true
| true
|
1c457edb6c9ecbc1d978023d080823ab44d6d1d2
| 560
|
py
|
Python
|
integration/emulator/test.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
integration/emulator/test.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
integration/emulator/test.py
|
cvlabmiet/master-programming-example
|
8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys, random, array
sys.dont_write_bytecode = True
from device import Pram, Lram
test_vector = list(range(0, 200))
random.shuffle(test_vector)
pram = Pram()
lram = Lram()
lram[:] = bytes(test_vector)
pram[:] = b'[u16:200:400]add(u8:0, u8:100)'
pram.run(lram)
view = memoryview(lram)[200:400].cast('H')
error_count = 0
for x in range(len(view)):
if view[x] != test_vector[x] + test_vector[x + 100]:
print("Error:", x, view[x], test_vector[x], test_vector[x + 100])
error_count += 1
sys.exit(error_count)
| 20.740741
| 73
| 0.664286
|
import sys, random, array
sys.dont_write_bytecode = True
from device import Pram, Lram
test_vector = list(range(0, 200))
random.shuffle(test_vector)
pram = Pram()
lram = Lram()
lram[:] = bytes(test_vector)
pram[:] = b'[u16:200:400]add(u8:0, u8:100)'
pram.run(lram)
view = memoryview(lram)[200:400].cast('H')
error_count = 0
for x in range(len(view)):
if view[x] != test_vector[x] + test_vector[x + 100]:
print("Error:", x, view[x], test_vector[x], test_vector[x + 100])
error_count += 1
sys.exit(error_count)
| true
| true
|
1c457f19469eb820eb88da2d97435a799d4d316b
| 1,354
|
py
|
Python
|
crslab/model/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | 1
|
2021-01-06T10:39:10.000Z
|
2021-01-06T10:39:10.000Z
|
crslab/model/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | null | null | null |
crslab/model/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | null | null | null |
# @Time : 2020/11/22
# @Author : Kun Zhou
# @Email : francis_kun_zhou@163.com
# UPDATE:
# @Time : 2020/11/24, 2020/12/24
# @Author : Kun Zhou, Xiaolei Wang
# @Email : francis_kun_zhou@163.com, wxl1999@foxmail.com
from loguru import logger
from .conversation import *
from .kbrd import *
from .kgsf import *
from .policy import *
from .recommendation import *
from .redial import *
from .tgredial import *
Model_register_table = {
'KGSF': KGSFModel,
'KBRD': KBRDModel,
'TGRec': TGRecModel,
'TGConv': TGConvModel,
'TGPolicy': TGPolicyModel,
'ReDialRec': ReDialRecModel,
'ReDialConv': ReDialConvModel,
'GPT2': GPT2Model,
'Transformer': TransformerModel,
'ConvBERT': ConvBERTModel,
'ProfileBERT': ProfileBERTModel,
'TopicBERT': TopicBERTModel,
'PMI': PMIModel,
'MGCG': MGCGModel,
'BERT': BERTModel,
'SASREC': SASRECModel,
'GRU4REC': GRU4RECModel,
'Popularity': PopularityModel,
'TextCNN': TextCNNModel
}
def get_model(config, model_name, device, vocab, side_data=None):
if model_name in Model_register_table:
model = Model_register_table[model_name](config, device, vocab, side_data)
logger.info(f'[Build model {model_name}]')
return model
else:
raise NotImplementedError('Model [{}] has not been implemented'.format(model_name))
| 27.08
| 91
| 0.6839
|
from loguru import logger
from .conversation import *
from .kbrd import *
from .kgsf import *
from .policy import *
from .recommendation import *
from .redial import *
from .tgredial import *
Model_register_table = {
'KGSF': KGSFModel,
'KBRD': KBRDModel,
'TGRec': TGRecModel,
'TGConv': TGConvModel,
'TGPolicy': TGPolicyModel,
'ReDialRec': ReDialRecModel,
'ReDialConv': ReDialConvModel,
'GPT2': GPT2Model,
'Transformer': TransformerModel,
'ConvBERT': ConvBERTModel,
'ProfileBERT': ProfileBERTModel,
'TopicBERT': TopicBERTModel,
'PMI': PMIModel,
'MGCG': MGCGModel,
'BERT': BERTModel,
'SASREC': SASRECModel,
'GRU4REC': GRU4RECModel,
'Popularity': PopularityModel,
'TextCNN': TextCNNModel
}
def get_model(config, model_name, device, vocab, side_data=None):
if model_name in Model_register_table:
model = Model_register_table[model_name](config, device, vocab, side_data)
logger.info(f'[Build model {model_name}]')
return model
else:
raise NotImplementedError('Model [{}] has not been implemented'.format(model_name))
| true
| true
|
1c457faa9ac5bd092b0c88919dffda9a035f0f60
| 8,729
|
py
|
Python
|
ckan_cloud_operator/providers/storage/minio/manager.py
|
mickeyrouash/ckan-cloud-operator
|
10e38f13964af30fe57b07e8d8a3b7521ed69cc2
|
[
"MIT"
] | null | null | null |
ckan_cloud_operator/providers/storage/minio/manager.py
|
mickeyrouash/ckan-cloud-operator
|
10e38f13964af30fe57b07e8d8a3b7521ed69cc2
|
[
"MIT"
] | null | null | null |
ckan_cloud_operator/providers/storage/minio/manager.py
|
mickeyrouash/ckan-cloud-operator
|
10e38f13964af30fe57b07e8d8a3b7521ed69cc2
|
[
"MIT"
] | null | null | null |
#### standard provider code ####
# import the correct PROVIDER_SUBMODULE and PROVIDER_ID constants for your provider
from .constants import PROVIDER_ID
from ..constants import PROVIDER_SUBMODULE
# define common provider functions based on the constants
from ckan_cloud_operator.providers import manager as providers_manager
def _get_resource_name(suffix=None): return providers_manager.get_resource_name(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _get_resource_labels(for_deployment=False, suffix=None): return providers_manager.get_resource_labels(PROVIDER_SUBMODULE, PROVIDER_ID, for_deployment=for_deployment, suffix=suffix)
def _get_resource_annotations(suffix=None): return providers_manager.get_resource_annotations(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _set_provider(): providers_manager.set_provider(PROVIDER_SUBMODULE, PROVIDER_ID)
def _config_set(key=None, value=None, values=None, namespace=None, is_secret=False, suffix=None): providers_manager.config_set(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, value=value, values=values, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_get(key=None, default=None, required=False, namespace=None, is_secret=False, suffix=None): return providers_manager.config_get(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, default=default, required=required, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_interactive_set(default_values, namespace=None, is_secret=False, suffix=None, from_file=False, interactive=False): providers_manager.config_interactive_set(PROVIDER_SUBMODULE, PROVIDER_ID, default_values, namespace, is_secret, suffix, from_file, interactive)
################################
# custom provider code starts here
#
import os
import binascii
import yaml
import json
from ckan_cloud_operator import kubectl
from ckan_cloud_operator import logs
from ckan_cloud_operator.routers import manager as routers_manager
def initialize(interactive=False, storage_suffix=None, use_existing_disk_name=None, dry_run=False):
_config_interactive_set({
'disk-size-gb': None,
**({} if storage_suffix else {'router-name': routers_manager.get_default_infra_router_name()})
}, interactive=interactive, suffix=storage_suffix)
_apply_secret(storage_suffix=storage_suffix)
_apply_deployment(
_get_or_create_volume(
storage_suffix=storage_suffix,
use_existing_disk_name=use_existing_disk_name
),
storage_suffix=storage_suffix,
dry_run=dry_run
)
_apply_service(storage_suffix=storage_suffix, dry_run=dry_run)
if not storage_suffix:
_update_route(storage_suffix=storage_suffix, dry_run=dry_run)
_set_provider()
def print_credentials(raw=False, storage_suffix=None):
hostname, access_key, secret_key = get_credentials(storage_suffix=storage_suffix)
if raw:
print(f'https://{hostname} {access_key} {secret_key}')
else:
print('Minio admin credentials:')
print('External Domain: ' + hostname)
print('Access Key: ' + access_key)
print('Secret Key: ' + secret_key)
print('\nto use with minio-client, run the following command:')
print(f'mc config host add my-storage https://{hostname} {access_key} {secret_key}')
def get_credentials(storage_suffix=None):
return [_get_frontend_hostname(storage_suffix=storage_suffix)] + [
_config_get(key, required=True, is_secret=True, suffix=storage_suffix)
for key in ['MINIO_ACCESS_KEY', 'MINIO_SECRET_KEY']
]
def _generate_password(l):
return binascii.hexlify(os.urandom(l)).decode()
def _apply_secret(storage_suffix=None):
access_key = _config_get('MINIO_ACCESS_KEY', required=False, is_secret=True, suffix=storage_suffix) or _generate_password(8)
secret_key = _config_get('MINIO_SECRET_KEY', required=False, is_secret=True, suffix=storage_suffix) or _generate_password(12)
_config_set(values={'MINIO_ACCESS_KEY': access_key, 'MINIO_SECRET_KEY': secret_key}, is_secret=True, suffix=storage_suffix)
def _apply_deployment(volume_spec, storage_suffix=None, dry_run=False):
node_selector = volume_spec.pop('nodeSelector', None)
if node_selector:
pod_scheduling = {'nodeSelector': node_selector}
else:
pod_scheduling = {}
container_spec_overrides = _config_get('container-spec-overrides', required=False, default=None, suffix=storage_suffix)
kubectl.apply(kubectl.get_deployment(
_get_resource_name(suffix=storage_suffix),
_get_resource_labels(for_deployment=True, suffix=storage_suffix),
{
'replicas': 1,
'revisionHistoryLimit': 10,
'strategy': {'type': 'Recreate', },
'template': {
'metadata': {
'labels': _get_resource_labels(for_deployment=True, suffix=storage_suffix),
'annotations': _get_resource_annotations(suffix=storage_suffix)
},
'spec': {
**pod_scheduling,
'containers': [
{
'name': 'minio',
'image': 'minio/minio',
'args': ['server', '/export'],
'envFrom': [{'secretRef': {'name': _get_resource_name(suffix=storage_suffix)}}],
'ports': [{'containerPort': 9000}],
'volumeMounts': [
{
'name': 'minio-data',
'mountPath': '/export',
}
],
**(json.loads(container_spec_overrides) if container_spec_overrides else {})
}
],
'volumes': [
dict(volume_spec, name='minio-data')
]
}
}
}
), dry_run=dry_run)
def _apply_service(storage_suffix=None, dry_run=False):
kubectl.apply(kubectl.get_resource(
'v1', 'Service',
_get_resource_name(suffix=storage_suffix),
_get_resource_labels(suffix=storage_suffix),
spec={
'ports': [
{'name': '9000', 'port': 9000}
],
'selector': {
'app': _get_resource_labels(for_deployment=True, suffix=storage_suffix)['app']
}
}
), dry_run=dry_run)
def _get_or_create_volume(storage_suffix=None, use_existing_disk_name=None):
disk_size_gb = _config_get('disk-size-gb', required=True, suffix=storage_suffix)
volume_spec = _config_get('volume-spec', required=False, suffix=storage_suffix)
if volume_spec:
volume_spec = yaml.load(volume_spec)
else:
from ckan_cloud_operator.providers.cluster import manager as cluster_manager
volume_spec = cluster_manager.create_volume(
disk_size_gb,
_get_resource_labels(suffix=storage_suffix),
use_existing_disk_name=use_existing_disk_name
)
_config_set('volume-spec', yaml.dump(volume_spec, default_flow_style=False), suffix=storage_suffix)
return volume_spec
def _update_route(storage_suffix=None, dry_run=False):
backend_url_target_id = _get_backend_url_target_id(storage_suffix=storage_suffix)
router_name = _config_get('router-name', required=True, suffix=storage_suffix)
if not routers_manager.get_backend_url_routes(backend_url_target_id):
deployment_name = _get_resource_name(suffix=storage_suffix)
namespace = _get_namespace()
subdomain_route = {
'target-type': 'backend-url',
'target-resource-id': backend_url_target_id,
'backend-url': f'http://{deployment_name}.{namespace}:9000',
}
if dry_run:
logs.info('create_subdomain_route', router_name, subdomain_route)
else:
routers_manager.create_subdomain_route(router_name, subdomain_route)
if not dry_run:
routers_manager.update(router_name, wait_ready=True)
def _get_namespace():
return 'ckan-cloud'
def _get_frontend_hostname(storage_suffix=None):
backend_url_target_id = _get_backend_url_target_id(storage_suffix=storage_suffix)
routes = routers_manager.get_backend_url_routes(backend_url_target_id)
assert storage_suffix or len(routes) == 1
if len(routes) < 1:
return 'localhost:9000'
else:
return routers_manager.get_route_frontend_hostname(routes[0])
def _get_backend_url_target_id(storage_suffix=None):
return f'minio-{storage_suffix}' if storage_suffix else 'minio'
| 44.764103
| 273
| 0.680719
|
perator.providers import manager as providers_manager
def _get_resource_name(suffix=None): return providers_manager.get_resource_name(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _get_resource_labels(for_deployment=False, suffix=None): return providers_manager.get_resource_labels(PROVIDER_SUBMODULE, PROVIDER_ID, for_deployment=for_deployment, suffix=suffix)
def _get_resource_annotations(suffix=None): return providers_manager.get_resource_annotations(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _set_provider(): providers_manager.set_provider(PROVIDER_SUBMODULE, PROVIDER_ID)
def _config_set(key=None, value=None, values=None, namespace=None, is_secret=False, suffix=None): providers_manager.config_set(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, value=value, values=values, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_get(key=None, default=None, required=False, namespace=None, is_secret=False, suffix=None): return providers_manager.config_get(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, default=default, required=required, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_interactive_set(default_values, namespace=None, is_secret=False, suffix=None, from_file=False, interactive=False): providers_manager.config_interactive_set(PROVIDER_SUBMODULE, PROVIDER_ID, default_values, namespace, is_secret, suffix, from_file, interactive)
suffix=storage_suffix)
_apply_secret(storage_suffix=storage_suffix)
_apply_deployment(
_get_or_create_volume(
storage_suffix=storage_suffix,
use_existing_disk_name=use_existing_disk_name
),
storage_suffix=storage_suffix,
dry_run=dry_run
)
_apply_service(storage_suffix=storage_suffix, dry_run=dry_run)
if not storage_suffix:
_update_route(storage_suffix=storage_suffix, dry_run=dry_run)
_set_provider()
def print_credentials(raw=False, storage_suffix=None):
hostname, access_key, secret_key = get_credentials(storage_suffix=storage_suffix)
if raw:
print(f'https://{hostname} {access_key} {secret_key}')
else:
print('Minio admin credentials:')
print('External Domain: ' + hostname)
print('Access Key: ' + access_key)
print('Secret Key: ' + secret_key)
print('\nto use with minio-client, run the following command:')
print(f'mc config host add my-storage https://{hostname} {access_key} {secret_key}')
def get_credentials(storage_suffix=None):
return [_get_frontend_hostname(storage_suffix=storage_suffix)] + [
_config_get(key, required=True, is_secret=True, suffix=storage_suffix)
for key in ['MINIO_ACCESS_KEY', 'MINIO_SECRET_KEY']
]
def _generate_password(l):
return binascii.hexlify(os.urandom(l)).decode()
def _apply_secret(storage_suffix=None):
access_key = _config_get('MINIO_ACCESS_KEY', required=False, is_secret=True, suffix=storage_suffix) or _generate_password(8)
secret_key = _config_get('MINIO_SECRET_KEY', required=False, is_secret=True, suffix=storage_suffix) or _generate_password(12)
_config_set(values={'MINIO_ACCESS_KEY': access_key, 'MINIO_SECRET_KEY': secret_key}, is_secret=True, suffix=storage_suffix)
def _apply_deployment(volume_spec, storage_suffix=None, dry_run=False):
node_selector = volume_spec.pop('nodeSelector', None)
if node_selector:
pod_scheduling = {'nodeSelector': node_selector}
else:
pod_scheduling = {}
container_spec_overrides = _config_get('container-spec-overrides', required=False, default=None, suffix=storage_suffix)
kubectl.apply(kubectl.get_deployment(
_get_resource_name(suffix=storage_suffix),
_get_resource_labels(for_deployment=True, suffix=storage_suffix),
{
'replicas': 1,
'revisionHistoryLimit': 10,
'strategy': {'type': 'Recreate', },
'template': {
'metadata': {
'labels': _get_resource_labels(for_deployment=True, suffix=storage_suffix),
'annotations': _get_resource_annotations(suffix=storage_suffix)
},
'spec': {
**pod_scheduling,
'containers': [
{
'name': 'minio',
'image': 'minio/minio',
'args': ['server', '/export'],
'envFrom': [{'secretRef': {'name': _get_resource_name(suffix=storage_suffix)}}],
'ports': [{'containerPort': 9000}],
'volumeMounts': [
{
'name': 'minio-data',
'mountPath': '/export',
}
],
**(json.loads(container_spec_overrides) if container_spec_overrides else {})
}
],
'volumes': [
dict(volume_spec, name='minio-data')
]
}
}
}
), dry_run=dry_run)
def _apply_service(storage_suffix=None, dry_run=False):
kubectl.apply(kubectl.get_resource(
'v1', 'Service',
_get_resource_name(suffix=storage_suffix),
_get_resource_labels(suffix=storage_suffix),
spec={
'ports': [
{'name': '9000', 'port': 9000}
],
'selector': {
'app': _get_resource_labels(for_deployment=True, suffix=storage_suffix)['app']
}
}
), dry_run=dry_run)
def _get_or_create_volume(storage_suffix=None, use_existing_disk_name=None):
disk_size_gb = _config_get('disk-size-gb', required=True, suffix=storage_suffix)
volume_spec = _config_get('volume-spec', required=False, suffix=storage_suffix)
if volume_spec:
volume_spec = yaml.load(volume_spec)
else:
from ckan_cloud_operator.providers.cluster import manager as cluster_manager
volume_spec = cluster_manager.create_volume(
disk_size_gb,
_get_resource_labels(suffix=storage_suffix),
use_existing_disk_name=use_existing_disk_name
)
_config_set('volume-spec', yaml.dump(volume_spec, default_flow_style=False), suffix=storage_suffix)
return volume_spec
def _update_route(storage_suffix=None, dry_run=False):
backend_url_target_id = _get_backend_url_target_id(storage_suffix=storage_suffix)
router_name = _config_get('router-name', required=True, suffix=storage_suffix)
if not routers_manager.get_backend_url_routes(backend_url_target_id):
deployment_name = _get_resource_name(suffix=storage_suffix)
namespace = _get_namespace()
subdomain_route = {
'target-type': 'backend-url',
'target-resource-id': backend_url_target_id,
'backend-url': f'http://{deployment_name}.{namespace}:9000',
}
if dry_run:
logs.info('create_subdomain_route', router_name, subdomain_route)
else:
routers_manager.create_subdomain_route(router_name, subdomain_route)
if not dry_run:
routers_manager.update(router_name, wait_ready=True)
def _get_namespace():
return 'ckan-cloud'
def _get_frontend_hostname(storage_suffix=None):
backend_url_target_id = _get_backend_url_target_id(storage_suffix=storage_suffix)
routes = routers_manager.get_backend_url_routes(backend_url_target_id)
assert storage_suffix or len(routes) == 1
if len(routes) < 1:
return 'localhost:9000'
else:
return routers_manager.get_route_frontend_hostname(routes[0])
def _get_backend_url_target_id(storage_suffix=None):
return f'minio-{storage_suffix}' if storage_suffix else 'minio'
| true
| true
|
1c4580a46e7319d59ea9439c79f77deb41aaa8c2
| 5,708
|
py
|
Python
|
luigi/rpc.py
|
miku/luigi
|
889ef2af64e2aa7d0cc65caef69a241ac91e5ff9
|
[
"Apache-2.0"
] | 4
|
2017-03-21T20:01:19.000Z
|
2022-03-29T16:31:41.000Z
|
luigi/rpc.py
|
miku/luigi
|
889ef2af64e2aa7d0cc65caef69a241ac91e5ff9
|
[
"Apache-2.0"
] | 9
|
2017-03-22T23:38:48.000Z
|
2019-01-28T21:13:06.000Z
|
luigi/rpc.py
|
miku/luigi
|
889ef2af64e2aa7d0cc65caef69a241ac91e5ff9
|
[
"Apache-2.0"
] | 2
|
2015-05-04T22:46:20.000Z
|
2016-07-14T17:58:57.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implementation of the REST interface between the workers and the server.
rpc.py implements the client side of it, server.py implements the server side.
See :doc:`/central_scheduler` for more info.
"""
import os
import json
import logging
import socket
import time
from luigi.six.moves.urllib.parse import urljoin, urlencode, urlparse
from luigi.six.moves.urllib.request import urlopen
from luigi.six.moves.urllib.error import URLError
from luigi import configuration
from luigi.scheduler import RPC_METHODS
HAS_UNIX_SOCKET = True
HAS_REQUESTS = True
try:
import requests_unixsocket as requests
except ImportError:
HAS_UNIX_SOCKET = False
try:
import requests
except ImportError:
HAS_REQUESTS = False
logger = logging.getLogger('luigi-interface') # TODO: 'interface'?
def _urljoin(base, url):
"""
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
"""
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(
urljoin(parsed._replace(scheme='http').geturl(), url)
)._replace(scheme=scheme).geturl()
class RPCError(Exception):
def __init__(self, message, sub_exception=None):
super(RPCError, self).__init__(message)
self.sub_exception = sub_exception
class URLLibFetcher(object):
raises = (URLError, socket.timeout)
def fetch(self, full_url, body, timeout):
body = urlencode(body).encode('utf-8')
return urlopen(full_url, body, timeout).read().decode('utf-8')
class RequestsFetcher(object):
def __init__(self, session):
from requests import exceptions as requests_exceptions
self.raises = requests_exceptions.RequestException
self.session = session
self.process_id = os.getpid()
def check_pid(self):
# if the process id change changed from when the session was created
# a new session needs to be setup since requests isn't multiprocessing safe.
if os.getpid() != self.process_id:
self.session = requests.Session()
self.process_id = os.getpid()
def fetch(self, full_url, body, timeout):
self.check_pid()
resp = self.session.get(full_url, data=body, timeout=timeout)
resp.raise_for_status()
return resp.text
class RemoteScheduler(object):
"""
Scheduler proxy object. Talks to a RemoteSchedulerResponder.
"""
def __init__(self, url='http://localhost:8082/', connect_timeout=None):
assert not url.startswith('http+unix://') or HAS_UNIX_SOCKET, (
'You need to install requests-unixsocket for Unix socket support.'
)
self._url = url.rstrip('/')
config = configuration.get_config()
if connect_timeout is None:
connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
self._connect_timeout = connect_timeout
self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3)
self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30)
self._rpc_log_retries = config.getboolean('core', 'rpc-log-retries', True)
if HAS_REQUESTS:
self._fetcher = RequestsFetcher(requests.Session())
else:
self._fetcher = URLLibFetcher()
def _wait(self):
if self._rpc_log_retries:
logger.info("Wait for %d seconds" % self._rpc_retry_wait)
time.sleep(self._rpc_retry_wait)
def _fetch(self, url_suffix, body):
full_url = _urljoin(self._url, url_suffix)
last_exception = None
attempt = 0
while attempt < self._rpc_retry_attempts:
attempt += 1
if last_exception:
if self._rpc_log_retries:
logger.info("Retrying attempt %r of %r (max)" % (attempt, self._rpc_retry_attempts))
self._wait() # wait for a bit and retry
try:
response = self._fetcher.fetch(full_url, body, self._connect_timeout)
break
except self._fetcher.raises as e:
last_exception = e
if self._rpc_log_retries:
logger.warning("Failed connecting to remote scheduler %r", self._url,
exc_info=True)
continue
else:
raise RPCError(
"Errors (%d attempts) when connecting to remote scheduler %r" %
(self._rpc_retry_attempts, self._url),
last_exception
)
return response
def _request(self, url, data, attempts=3, allow_null=True):
body = {'data': json.dumps(data)}
for _ in range(attempts):
page = self._fetch(url, body)
response = json.loads(page)["response"]
if allow_null or response is not None:
return response
raise RPCError("Received null response from remote scheduler %r" % self._url)
for method_name, method in RPC_METHODS.items():
setattr(RemoteScheduler, method_name, method)
| 33.380117
| 104
| 0.653644
|
import os
import json
import logging
import socket
import time
from luigi.six.moves.urllib.parse import urljoin, urlencode, urlparse
from luigi.six.moves.urllib.request import urlopen
from luigi.six.moves.urllib.error import URLError
from luigi import configuration
from luigi.scheduler import RPC_METHODS
HAS_UNIX_SOCKET = True
HAS_REQUESTS = True
try:
import requests_unixsocket as requests
except ImportError:
HAS_UNIX_SOCKET = False
try:
import requests
except ImportError:
HAS_REQUESTS = False
logger = logging.getLogger('luigi-interface')
def _urljoin(base, url):
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(
urljoin(parsed._replace(scheme='http').geturl(), url)
)._replace(scheme=scheme).geturl()
class RPCError(Exception):
def __init__(self, message, sub_exception=None):
super(RPCError, self).__init__(message)
self.sub_exception = sub_exception
class URLLibFetcher(object):
raises = (URLError, socket.timeout)
def fetch(self, full_url, body, timeout):
body = urlencode(body).encode('utf-8')
return urlopen(full_url, body, timeout).read().decode('utf-8')
class RequestsFetcher(object):
def __init__(self, session):
from requests import exceptions as requests_exceptions
self.raises = requests_exceptions.RequestException
self.session = session
self.process_id = os.getpid()
def check_pid(self):
if os.getpid() != self.process_id:
self.session = requests.Session()
self.process_id = os.getpid()
def fetch(self, full_url, body, timeout):
self.check_pid()
resp = self.session.get(full_url, data=body, timeout=timeout)
resp.raise_for_status()
return resp.text
class RemoteScheduler(object):
def __init__(self, url='http://localhost:8082/', connect_timeout=None):
assert not url.startswith('http+unix://') or HAS_UNIX_SOCKET, (
'You need to install requests-unixsocket for Unix socket support.'
)
self._url = url.rstrip('/')
config = configuration.get_config()
if connect_timeout is None:
connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
self._connect_timeout = connect_timeout
self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3)
self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30)
self._rpc_log_retries = config.getboolean('core', 'rpc-log-retries', True)
if HAS_REQUESTS:
self._fetcher = RequestsFetcher(requests.Session())
else:
self._fetcher = URLLibFetcher()
def _wait(self):
if self._rpc_log_retries:
logger.info("Wait for %d seconds" % self._rpc_retry_wait)
time.sleep(self._rpc_retry_wait)
def _fetch(self, url_suffix, body):
full_url = _urljoin(self._url, url_suffix)
last_exception = None
attempt = 0
while attempt < self._rpc_retry_attempts:
attempt += 1
if last_exception:
if self._rpc_log_retries:
logger.info("Retrying attempt %r of %r (max)" % (attempt, self._rpc_retry_attempts))
self._wait() # wait for a bit and retry
try:
response = self._fetcher.fetch(full_url, body, self._connect_timeout)
break
except self._fetcher.raises as e:
last_exception = e
if self._rpc_log_retries:
logger.warning("Failed connecting to remote scheduler %r", self._url,
exc_info=True)
continue
else:
raise RPCError(
"Errors (%d attempts) when connecting to remote scheduler %r" %
(self._rpc_retry_attempts, self._url),
last_exception
)
return response
def _request(self, url, data, attempts=3, allow_null=True):
body = {'data': json.dumps(data)}
for _ in range(attempts):
page = self._fetch(url, body)
response = json.loads(page)["response"]
if allow_null or response is not None:
return response
raise RPCError("Received null response from remote scheduler %r" % self._url)
for method_name, method in RPC_METHODS.items():
setattr(RemoteScheduler, method_name, method)
| true
| true
|
1c4581505fbb614f1ce2848ca80ed21dafdc2751
| 1,094
|
py
|
Python
|
quick_start.py
|
willin007/kucoin_sdk
|
a4967c9f684aa4917a4b9e668d43520307eb9d30
|
[
"MIT"
] | null | null | null |
quick_start.py
|
willin007/kucoin_sdk
|
a4967c9f684aa4917a4b9e668d43520307eb9d30
|
[
"MIT"
] | null | null | null |
quick_start.py
|
willin007/kucoin_sdk
|
a4967c9f684aa4917a4b9e668d43520307eb9d30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/4 10:50 上午
# @Author : william.li
# @File : quick_start.py
# @Software: PyCharm
# MarketData
import asyncio
from kucoin.client import WsToken
from kucoin.ws_client import KucoinWsClient
async def main():
async def deal_msg(msg):
if msg['topic'] == '/spotMarket/level2Depth5:BTC-USDT':
print(msg["data"])
elif msg['topic'] == '/spotMarket/level2Depth5:KCS-USDT':
print(f'Get KCS level3:{msg["data"]}')
# is public
client = WsToken()
#is private
# client = WsToken(key='', secret='', passphrase='', is_sandbox=False, url='')
# is sandbox
# client = WsToken(is_sandbox=True)
ws_client = await KucoinWsClient.create(None, client, deal_msg, private=False)
# await ws_client.subscribe('/market/ticker:BTC-USDT,ETH-USDT')
await ws_client.subscribe('/spotMarket/level2Depth5:KCS-USDT')
while True:
await asyncio.sleep(60, loop=loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 29.567568
| 82
| 0.652651
|
import asyncio
from kucoin.client import WsToken
from kucoin.ws_client import KucoinWsClient
async def main():
async def deal_msg(msg):
if msg['topic'] == '/spotMarket/level2Depth5:BTC-USDT':
print(msg["data"])
elif msg['topic'] == '/spotMarket/level2Depth5:KCS-USDT':
print(f'Get KCS level3:{msg["data"]}')
client = WsToken()
ws_client = await KucoinWsClient.create(None, client, deal_msg, private=False)
await ws_client.subscribe('/spotMarket/level2Depth5:KCS-USDT')
while True:
await asyncio.sleep(60, loop=loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| true
| true
|
1c45828c2da100de725a4b389922ca6abe3ce11d
| 1,901
|
py
|
Python
|
setup.py
|
cajfisher/vasppy
|
a460db14163b7db3bce54d754dd476c45a3ed85b
|
[
"MIT"
] | 28
|
2017-02-16T13:22:34.000Z
|
2021-04-29T06:10:10.000Z
|
setup.py
|
cajfisher/vasppy
|
a460db14163b7db3bce54d754dd476c45a3ed85b
|
[
"MIT"
] | 15
|
2016-05-09T13:08:42.000Z
|
2021-08-09T10:59:58.000Z
|
setup.py
|
cajfisher/vasppy
|
a460db14163b7db3bce54d754dd476c45a3ed85b
|
[
"MIT"
] | 25
|
2015-10-12T11:29:22.000Z
|
2021-08-20T17:33:27.000Z
|
"""
vasppy: Python utilities for working with VASP inputs and outputs.
"""
from setuptools import setup, find_packages
from vasppy.version import __version__ as VERSION
readme = 'README.md'
long_description = open(readme).read()
scripts = ['check_species',
'murnfit',
'vasp_summary',
'poscar_to_cif',
'potcar_spec',
'effective_mass',
'fat_bands',
'pimaim_to_poscar',
'pimaim_to_xtl',
'poscar_sort',
'poscar_to_pimaim',
'poscar_to_xtl',
'proc_poscar',
'rotate_poscar',
'spacegroup',
'vasp_grid',
'xdatcar_to_disp',
'xdatcar_to_poscart',
'xdatcar_to_rdf']
setup(
name='vasppy',
version=VERSION,
description='Python utilities for working with VASP inputs and outputs',
long_description=long_description,
long_description_content_type="text/markdown",
author='Benjamin J. Morgan',
author_email='bjm42@bath.ac.uk',
url='https://github.com/bjmorgan/vasppy',
download_url='https://github.com/bjmorgan/vasppy/archive/{}.tar.gz'.format(VERSION),
keywords=['vasp'], # keywords
packages=find_packages(exclude=['docs', 'tests*']),
package_data={'vasppy': ['data/*.yaml']},
entry_points={'console_scripts':[
'{} = vasppy.scripts.{}:main'.format(s, s) for s in scripts]},
license='MIT',
install_requires=['monty',
'numpy>=1.16.2',
'pandas',
'pymatgen>=2022.0.0',
'PyYAML',
'coverage==4.3.4',
'codeclimate-test-reporter',
'fortranformat',
'scipy>=1.4.1',
'tqdm',
'lxml'],
python_requires='>=3.7'
)
| 31.683333
| 88
| 0.538138
|
from setuptools import setup, find_packages
from vasppy.version import __version__ as VERSION
readme = 'README.md'
long_description = open(readme).read()
scripts = ['check_species',
'murnfit',
'vasp_summary',
'poscar_to_cif',
'potcar_spec',
'effective_mass',
'fat_bands',
'pimaim_to_poscar',
'pimaim_to_xtl',
'poscar_sort',
'poscar_to_pimaim',
'poscar_to_xtl',
'proc_poscar',
'rotate_poscar',
'spacegroup',
'vasp_grid',
'xdatcar_to_disp',
'xdatcar_to_poscart',
'xdatcar_to_rdf']
setup(
name='vasppy',
version=VERSION,
description='Python utilities for working with VASP inputs and outputs',
long_description=long_description,
long_description_content_type="text/markdown",
author='Benjamin J. Morgan',
author_email='bjm42@bath.ac.uk',
url='https://github.com/bjmorgan/vasppy',
download_url='https://github.com/bjmorgan/vasppy/archive/{}.tar.gz'.format(VERSION),
keywords=['vasp'],
packages=find_packages(exclude=['docs', 'tests*']),
package_data={'vasppy': ['data/*.yaml']},
entry_points={'console_scripts':[
'{} = vasppy.scripts.{}:main'.format(s, s) for s in scripts]},
license='MIT',
install_requires=['monty',
'numpy>=1.16.2',
'pandas',
'pymatgen>=2022.0.0',
'PyYAML',
'coverage==4.3.4',
'codeclimate-test-reporter',
'fortranformat',
'scipy>=1.4.1',
'tqdm',
'lxml'],
python_requires='>=3.7'
)
| true
| true
|
1c4582bb37d8bf82a9eadb8ac9e0bbddd1dde76a
| 7,194
|
py
|
Python
|
hack/boilerplate/boilerplate.py
|
moelsayed/kubeone
|
bec424b09d2d0cb5d97347469c947ab66c5c1d91
|
[
"Apache-2.0"
] | 1
|
2020-02-13T17:46:28.000Z
|
2020-02-13T17:46:28.000Z
|
hack/boilerplate/boilerplate.py
|
moelsayed/kubeone
|
bec424b09d2d0cb5d97347469c947ab66c5c1d91
|
[
"Apache-2.0"
] | null | null | null |
hack/boilerplate/boilerplate.py
|
moelsayed/kubeone
|
bec424b09d2d0cb5d97347469c947ab66c5c1d91
|
[
"Apache-2.0"
] | 1
|
2020-05-06T15:33:38.000Z
|
2020-05-06T15:33:38.000Z
|
#!/usr/bin/env python
# Copyright 2019 The KubeOne Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
if generated:
extension = "generatego"
else:
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = [
'bin',
'Godeps',
'.git',
'vendor',
'hack/boilerplate/test',
'pkg/apis/kubeadm/v1beta1',
'pkg/apis/kubeadm/v1beta2',
]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile('DO NOT EDIT')
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| 29.604938
| 124
| 0.630108
|
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
if generated:
extension = "generatego"
else:
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = [
'bin',
'Godeps',
'.git',
'vendor',
'hack/boilerplate/test',
'pkg/apis/kubeadm/v1beta1',
'pkg/apis/kubeadm/v1beta2',
]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
regexs["year"] = re.compile('YEAR')
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile('DO NOT EDIT')
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| true
| true
|
1c45839120b9c193c462707af258c3c9bfffdfa3
| 568
|
py
|
Python
|
tests/test_level1/test_visited.py
|
kianmeng/soupsieve
|
a8640aad6ae0476e6b62f4f15e12ad4efc7605c4
|
[
"MIT"
] | 130
|
2018-12-27T06:00:32.000Z
|
2022-03-29T05:47:18.000Z
|
tests/test_level1/test_visited.py
|
kianmeng/soupsieve
|
a8640aad6ae0476e6b62f4f15e12ad4efc7605c4
|
[
"MIT"
] | 157
|
2018-12-07T07:44:15.000Z
|
2022-02-05T16:20:08.000Z
|
tests/test_level1/test_visited.py
|
kianmeng/soupsieve
|
a8640aad6ae0476e6b62f4f15e12ad4efc7605c4
|
[
"MIT"
] | 32
|
2018-12-31T03:11:55.000Z
|
2022-03-06T09:06:43.000Z
|
"""Test visited selectors."""
from .. import util
class TestVisited(util.TestCase):
"""Test visited selectors."""
def test_visited(self):
"""Test visited."""
markup = """
<div>
<p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
"""
self.assert_selector(
markup,
"a:visited",
[],
flags=util.HTML
)
| 21.846154
| 80
| 0.482394
|
from .. import util
class TestVisited(util.TestCase):
def test_visited(self):
markup = """
<div>
<p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
"""
self.assert_selector(
markup,
"a:visited",
[],
flags=util.HTML
)
| true
| true
|
1c4584ac1bc01ab917fbb00db92b230e45196a27
| 5,228
|
py
|
Python
|
export.py
|
OleksandrBlack/safecoinnodes
|
0021edc8e72e078fcd7bedb465292c96caeeb148
|
[
"MIT"
] | null | null | null |
export.py
|
OleksandrBlack/safecoinnodes
|
0021edc8e72e078fcd7bedb465292c96caeeb148
|
[
"MIT"
] | null | null | null |
export.py
|
OleksandrBlack/safecoinnodes
|
0021edc8e72e078fcd7bedb465292c96caeeb148
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# export.py - Exports enumerated data for reachable nodes into a JSON file.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Exports enumerated data for reachable nodes into a JSON file.
"""
import json
import logging
import os
import sys
import time
from binascii import hexlify, unhexlify
from ConfigParser import ConfigParser
from utils import new_redis_conn
REDIS_CONN = None
CONF = {}
def get_row(node):
"""
Returns enumerated row data from Redis for the specified node.
"""
# address, port, version, user_agent, timestamp, services
node = eval(node)
address = node[0]
port = node[1]
services = node[-1]
height = REDIS_CONN.get('height:{}-{}-{}'.format(address, port, services))
if height is None:
height = (0,)
else:
height = (int(height),)
hostname = REDIS_CONN.hget('resolve:{}'.format(address), 'hostname')
hostname = (hostname,)
geoip = REDIS_CONN.hget('resolve:{}'.format(address), 'geoip')
if geoip is None:
# city, country, latitude, longitude, timezone, asn, org
geoip = (None, None, 0.0, 0.0, None, None, None)
else:
geoip = eval(geoip)
return node + height + hostname + geoip
MAX_DUMPED_SNAPSHOTS = 500
def export_nodes(nodes, timestamp):
"""
Merges enumerated data for the specified nodes and exports them into
timestamp-prefixed JSON file.
"""
rows = []
start = time.time()
for node in nodes:
row = get_row(node)
rows.append(row)
end = time.time()
elapsed = end - start
logging.info("Elapsed: %d", elapsed)
dump = os.path.join(CONF['export_dir'], "{}.json".format(timestamp))
open(dump, 'w').write(json.dumps(rows, encoding="latin-1"))
REDIS_CONN.lpush('dumped_snapshots', timestamp)
REDIS_CONN.ltrim('dumped_snapshots', 0, MAX_DUMPED_SNAPSHOTS)
logging.info("Wrote %s", dump)
def init_conf(argv):
"""
Populates CONF with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('export', 'logfile')
CONF['magic_number'] = unhexlify(conf.get('export', 'magic_number'))
CONF['db'] = conf.getint('export', 'db')
CONF['debug'] = conf.getboolean('export', 'debug')
CONF['export_dir'] = conf.get('export', 'export_dir')
if not os.path.exists(CONF['export_dir']):
os.makedirs(CONF['export_dir'])
def main(argv):
if len(argv) < 2 or not os.path.exists(argv[1]):
print("Usage: export.py [config]")
return 1
# Initialize global conf
init_conf(argv)
# Initialize logger
loglevel = logging.INFO
if CONF['debug']:
loglevel = logging.DEBUG
logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
"%(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=CONF['logfile'],
filemode='w')
print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))
global REDIS_CONN
REDIS_CONN = new_redis_conn(db=CONF['db'])
subscribe_key = 'resolve:{}'.format(hexlify(CONF['magic_number']))
publish_key = 'export:{}'.format(hexlify(CONF['magic_number']))
pubsub = REDIS_CONN.pubsub()
pubsub.subscribe(subscribe_key)
while True:
msg = pubsub.get_message()
if msg is None:
time.sleep(0.001) # 1 ms artificial intrinsic latency.
continue
# 'resolve' message is published by resolve.py after resolving hostname
# and GeoIP data for all reachable nodes.
if msg['channel'] == subscribe_key and msg['type'] == 'message':
timestamp = int(msg['data']) # From ping.py's 'snapshot' message
logging.info("Timestamp: %d", timestamp)
nodes = REDIS_CONN.smembers('opendata')
logging.info("Nodes: %d", len(nodes))
export_nodes(nodes, timestamp)
REDIS_CONN.publish(publish_key, timestamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 32.271605
| 79
| 0.655318
|
import json
import logging
import os
import sys
import time
from binascii import hexlify, unhexlify
from ConfigParser import ConfigParser
from utils import new_redis_conn
REDIS_CONN = None
CONF = {}
def get_row(node):
node = eval(node)
address = node[0]
port = node[1]
services = node[-1]
height = REDIS_CONN.get('height:{}-{}-{}'.format(address, port, services))
if height is None:
height = (0,)
else:
height = (int(height),)
hostname = REDIS_CONN.hget('resolve:{}'.format(address), 'hostname')
hostname = (hostname,)
geoip = REDIS_CONN.hget('resolve:{}'.format(address), 'geoip')
if geoip is None:
geoip = (None, None, 0.0, 0.0, None, None, None)
else:
geoip = eval(geoip)
return node + height + hostname + geoip
MAX_DUMPED_SNAPSHOTS = 500
def export_nodes(nodes, timestamp):
rows = []
start = time.time()
for node in nodes:
row = get_row(node)
rows.append(row)
end = time.time()
elapsed = end - start
logging.info("Elapsed: %d", elapsed)
dump = os.path.join(CONF['export_dir'], "{}.json".format(timestamp))
open(dump, 'w').write(json.dumps(rows, encoding="latin-1"))
REDIS_CONN.lpush('dumped_snapshots', timestamp)
REDIS_CONN.ltrim('dumped_snapshots', 0, MAX_DUMPED_SNAPSHOTS)
logging.info("Wrote %s", dump)
def init_conf(argv):
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('export', 'logfile')
CONF['magic_number'] = unhexlify(conf.get('export', 'magic_number'))
CONF['db'] = conf.getint('export', 'db')
CONF['debug'] = conf.getboolean('export', 'debug')
CONF['export_dir'] = conf.get('export', 'export_dir')
if not os.path.exists(CONF['export_dir']):
os.makedirs(CONF['export_dir'])
def main(argv):
if len(argv) < 2 or not os.path.exists(argv[1]):
print("Usage: export.py [config]")
return 1
init_conf(argv)
loglevel = logging.INFO
if CONF['debug']:
loglevel = logging.DEBUG
logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
"%(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=CONF['logfile'],
filemode='w')
print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))
global REDIS_CONN
REDIS_CONN = new_redis_conn(db=CONF['db'])
subscribe_key = 'resolve:{}'.format(hexlify(CONF['magic_number']))
publish_key = 'export:{}'.format(hexlify(CONF['magic_number']))
pubsub = REDIS_CONN.pubsub()
pubsub.subscribe(subscribe_key)
while True:
msg = pubsub.get_message()
if msg is None:
time.sleep(0.001)
continue
if msg['channel'] == subscribe_key and msg['type'] == 'message':
timestamp = int(msg['data'])
logging.info("Timestamp: %d", timestamp)
nodes = REDIS_CONN.smembers('opendata')
logging.info("Nodes: %d", len(nodes))
export_nodes(nodes, timestamp)
REDIS_CONN.publish(publish_key, timestamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| true
| true
|
1c4584cb83547e7b831785c85e43413291a71a8c
| 2,135
|
py
|
Python
|
Stopwatch.py
|
arapawa/stopwatch-game
|
5ee64e04a8dc15ead2dcd8a661105ae1c9087317
|
[
"MIT"
] | null | null | null |
Stopwatch.py
|
arapawa/stopwatch-game
|
5ee64e04a8dc15ead2dcd8a661105ae1c9087317
|
[
"MIT"
] | 1
|
2016-12-30T06:59:12.000Z
|
2016-12-30T06:59:12.000Z
|
Stopwatch.py
|
arapawa/stopwatch-game
|
5ee64e04a8dc15ead2dcd8a661105ae1c9087317
|
[
"MIT"
] | null | null | null |
# "Stopwatch: The Game"
# tenth of a second between every tick
# every time timer ticks, it will update a global variable by one
import simplegui
# define global variables
time = 0
success = 0
attempts = 0
counter = 0
# variable to ensure score can only be increased after stopwatch was running
stopwatch_running = False
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
a_time = t // 600
b_time = ((t / 10) % 60) // 10
c_time = ((t / 10) % 60) % 10
d_time = t % 10
return str(a_time) + ":" + str(b_time) + str(c_time) + "." + str(d_time)
# define event handlers for buttons; "Start", "Stop", "Reset"
def button_start():
stopwatch_timer()
def button_stop():
timer.stop()
global success, attempts, stopwatch_running
if stopwatch_running == True:
if (time % 10) == 0:
success += 1
attempts += 1
else:
attempts += 1
else:
return
stopwatch_running = False
def button_reset():
global time, success, attempts
time = 0
success = 0
attempts = 0
return time, success, attempts
# define event handler for timer with 0.1 sec interval
# stopwatch timer event handler
def stopwatch_timer():
timer.start()
global time, stopwatch_running
time += 1
stopwatch_running = True
return time, stopwatch_running
# define draw handler
def draw_handler(canvas):
# stopwatch display on canvas
canvas.draw_text(format(time), [90, 140], 50, "White")
# score display
canvas.draw_text(str(success) + "/" + str(attempts), [220, 50], 25, "Red")
# create frame
frame = simplegui.create_frame("Stopwatch: The Game", 300, 200)
# register event handlers
timer = simplegui.create_timer(100, button_start)
start = frame.add_button("Start", button_start, 100)
stop = frame.add_button("Stop", button_stop, 100)
reset = frame.add_button("Reset", button_reset, 100)
frame.set_draw_handler(draw_handler)
# start frame
frame.start()
| 26.036585
| 79
| 0.640281
|
import simplegui
time = 0
success = 0
attempts = 0
counter = 0
stopwatch_running = False
def format(t):
a_time = t // 600
b_time = ((t / 10) % 60) // 10
c_time = ((t / 10) % 60) % 10
d_time = t % 10
return str(a_time) + ":" + str(b_time) + str(c_time) + "." + str(d_time)
def button_start():
stopwatch_timer()
def button_stop():
timer.stop()
global success, attempts, stopwatch_running
if stopwatch_running == True:
if (time % 10) == 0:
success += 1
attempts += 1
else:
attempts += 1
else:
return
stopwatch_running = False
def button_reset():
global time, success, attempts
time = 0
success = 0
attempts = 0
return time, success, attempts
def stopwatch_timer():
timer.start()
global time, stopwatch_running
time += 1
stopwatch_running = True
return time, stopwatch_running
def draw_handler(canvas):
canvas.draw_text(format(time), [90, 140], 50, "White")
canvas.draw_text(str(success) + "/" + str(attempts), [220, 50], 25, "Red")
frame = simplegui.create_frame("Stopwatch: The Game", 300, 200)
timer = simplegui.create_timer(100, button_start)
start = frame.add_button("Start", button_start, 100)
stop = frame.add_button("Stop", button_stop, 100)
reset = frame.add_button("Reset", button_reset, 100)
frame.set_draw_handler(draw_handler)
frame.start()
| true
| true
|
1c458641abbee4ca565c0de49e6620d72012ccb6
| 20,836
|
py
|
Python
|
cripts/relationships/handlers.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 2
|
2017-04-06T12:26:11.000Z
|
2018-11-05T19:17:15.000Z
|
cripts/relationships/handlers.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 9
|
2016-09-28T10:19:10.000Z
|
2017-02-24T17:58:43.000Z
|
cripts/relationships/handlers.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | null | null | null |
import datetime
from dateutil.parser import parse
from cripts.core.class_mapper import class_from_id
def get_relationships(obj=None, type_=None, id_=None, analyst=None):
"""
Get relationships for a top-level object.
:param obj: The top-level object to get relationships for.
:type obj: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param type_: The top-level object type to get relationships for.
:type type_: str
:param id_: The ObjectId of the top-level object.
:type id_: str
:param analyst: The user requesting the relationships.
:type analyst: str
:returns: dict
"""
if obj:
return obj.sort_relationships("%s" % analyst, meta=True)
elif type_ and id_:
obj = class_from_id(type_, id_)
if not obj:
return {}
return obj.sort_relationships("%s" % analyst, meta=True)
else:
return {}
def forge_relationship(type_=None, id_=None,
class_=None, right_type=None,
right_id=None, right_class=None,
rel_type=None, rel_date=None,
user=None, rel_reason="",
rel_confidence='unknown', get_rels=False, **kwargs):
"""
Forge a relationship between two top-level objects.
:param type_: The type of first top-level object to relate to.
:type type_: str
:param id_: The ObjectId of the first top-level object.
:type id_: str
:param class_: The first top-level object to relate to.
:type class_: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_type: The type of second top-level object to relate to.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param right_class: The second top-level object to relate to.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param user: The user forging this relationship.
:type user: str
:param rel_reason: The reason for the relationship.
:type rel_reason: str
:param rel_confidence: The confidence of the relationship.
:type rel_confidence: str
:param get_rels: Return the relationships after forging.
:type get_rels: boolean
:returns: dict with keys:
"success" (boolean)
"message" (str if fail, EmbeddedObject if success)
"relationships" (dict)
"""
if rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not class_:
if type_ and id_:
class_ = class_from_id(type_, id_)
if not class_:
return {'success': False, 'message': "Failed to get left TLO"}
if not right_class:
if right_type and right_id:
print ("right type:" + str(right_type))
print ("right id:" + str(right_id))
right_class = class_from_id(right_type, right_id)
if not right_class:
return {'success': False, 'message': "Failed to get right TLO"}
try:
# forge relationship
results = class_.add_relationship(right_class, rel_type, rel_date,
user, rel_confidence, rel_reason)
except Exception as e:
return {'success': False, 'message': e}
if results['success']:
class_.update(add_to_set__relationships=results['message'])
if get_rels:
results['relationships'] = class_.sort_relationships("%s" % user,
meta=True)
return results
def delete_all_relationships(left_class=None, left_type=None,
left_id=None, analyst=None):
"""
Delete all relationships for this top-level object.
:param left_class: The top-level object to delete relationships for.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of the top-level object.
:type left_type: str
:param left_id: The ObjectId of the top-level object.
:type left_id: str
:param analyst: The user deleting these relationships.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
return left_class.delete_all_relationships()
def delete_relationship(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
analyst=None, get_rels=True):
"""
Delete a relationship between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user deleting this relationship.
:type analyst: str
:param get_rels: Return the relationships after forging.
:type get_rels: boolean
:returns: dict with keys "success" (boolean) and "message" (str if
failed, dict if successful)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# delete relationship
if right_class:
results = left_class.delete_relationship(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
else:
if right_type and right_id:
results = left_class.delete_relationship(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
if results['success']:
left_class.save(username=analyst)
if get_rels:
results['relationships'] = left_class.sort_relationships("%s" % analyst, meta=True)
return results
def update_relationship_types(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_type: The new type of relationship.
:type new_type: str
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_type(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_type(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_confidences(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None,
new_confidence='unknown'):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user updating this relationship.
:type analyst: str
:param new_confidence: The new confidence level.
:type new_confidence: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_confidence(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_confidence(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_reasons(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None, new_reason="N/A"):
"""
Update the relationship type between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_reason(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_reason(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_dates(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_date=None,analyst=None):
"""
Update the relationship date between two top-level objects.
:param left_class: The first top-level object.
:type left_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param right_class: The second top-level object.
:type right_class: :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param left_type: The type of first top-level object.
:type left_type: str
:param left_id: The ObjectId of the first top-level object.
:type left_id: str
:param right_type: The type of second top-level object.
:type right_type: str
:param right_id: The ObjectId of the second top-level object.
:type right_id: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_date: The new date of the relationship.
:type new_date: str
:param analyst: The user updating this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if new_date is None or new_date == 'None':
new_date = None
elif isinstance(new_date, basestring) and new_date != '':
new_date = parse(new_date, fuzzy=True)
elif not isinstance(new_date, datetime.datetime):
new_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
# update relationship
if right_class:
results = left_class.edit_relationship_date(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_date(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
| 42.522449
| 95
| 0.578038
|
import datetime
from dateutil.parser import parse
from cripts.core.class_mapper import class_from_id
def get_relationships(obj=None, type_=None, id_=None, analyst=None):
if obj:
return obj.sort_relationships("%s" % analyst, meta=True)
elif type_ and id_:
obj = class_from_id(type_, id_)
if not obj:
return {}
return obj.sort_relationships("%s" % analyst, meta=True)
else:
return {}
def forge_relationship(type_=None, id_=None,
class_=None, right_type=None,
right_id=None, right_class=None,
rel_type=None, rel_date=None,
user=None, rel_reason="",
rel_confidence='unknown', get_rels=False, **kwargs):
if rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not class_:
if type_ and id_:
class_ = class_from_id(type_, id_)
if not class_:
return {'success': False, 'message': "Failed to get left TLO"}
if not right_class:
if right_type and right_id:
print ("right type:" + str(right_type))
print ("right id:" + str(right_id))
right_class = class_from_id(right_type, right_id)
if not right_class:
return {'success': False, 'message': "Failed to get right TLO"}
try:
results = class_.add_relationship(right_class, rel_type, rel_date,
user, rel_confidence, rel_reason)
except Exception as e:
return {'success': False, 'message': e}
if results['success']:
class_.update(add_to_set__relationships=results['message'])
if get_rels:
results['relationships'] = class_.sort_relationships("%s" % user,
meta=True)
return results
def delete_all_relationships(left_class=None, left_type=None,
left_id=None, analyst=None):
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
return left_class.delete_all_relationships()
def delete_relationship(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
analyst=None, get_rels=True):
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
if right_class:
results = left_class.delete_relationship(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
else:
if right_type and right_id:
results = left_class.delete_relationship(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
analyst=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
if results['success']:
left_class.save(username=analyst)
if get_rels:
results['relationships'] = left_class.sort_relationships("%s" % analyst, meta=True)
return results
def update_relationship_types(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None):
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
if right_class:
results = left_class.edit_relationship_type(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_type(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_type=new_type,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_confidences(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None,
new_confidence='unknown'):
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
if right_class:
results = left_class.edit_relationship_confidence(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_confidence(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_confidence=new_confidence,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_reasons(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_type=None,analyst=None, new_reason="N/A"):
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
else:
return {'success': False,
'message': "Need a valid left type and id"}
if right_class:
results = left_class.edit_relationship_reason(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_reason(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_reason=new_reason,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
def update_relationship_dates(left_class=None, right_class=None,
left_type=None, left_id=None,
right_type=None, right_id=None,
rel_type=None, rel_date=None,
new_date=None,analyst=None):
if rel_date is None or rel_date == 'None':
rel_date = None
elif isinstance(rel_date, basestring) and rel_date != '':
rel_date = parse(rel_date, fuzzy=True)
elif not isinstance(rel_date, datetime.datetime):
rel_date = None
if new_date is None or new_date == 'None':
new_date = None
elif isinstance(new_date, basestring) and new_date != '':
new_date = parse(new_date, fuzzy=True)
elif not isinstance(new_date, datetime.datetime):
new_date = None
if not left_class:
if left_type and left_id:
left_class = class_from_id(left_type, left_id)
if not left_class:
return {'success': False,
'message': "Unable to get object."}
else:
return {'success': False,
'message': "Need a valid left type and id"}
if right_class:
results = left_class.edit_relationship_date(rel_item=right_class,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
right_class.save(username=analyst)
else:
if right_type and right_id:
results = left_class.edit_relationship_date(type_=right_type,
rel_id=right_id,
rel_type=rel_type,
rel_date=rel_date,
new_date=new_date,
analyst=analyst)
left_class.save(username=analyst)
else:
return {'success': False,
'message': "Need a valid right type and id"}
return results
| true
| true
|
1c45866e5a644fc50a8ed3659b45f9a0dee3b769
| 1,731
|
py
|
Python
|
pytorch/skin_lesion_classification/plots.py
|
deephealthproject/use-case-pipelines
|
ea9c8aedfbc9084e1a5350f6f73def2578258c77
|
[
"MIT"
] | 1
|
2020-05-20T16:57:11.000Z
|
2020-05-20T16:57:11.000Z
|
pytorch/skin_lesion_classification/plots.py
|
deephealthproject/use-case-pipelines
|
ea9c8aedfbc9084e1a5350f6f73def2578258c77
|
[
"MIT"
] | 5
|
2021-03-26T16:01:51.000Z
|
2021-09-20T13:53:22.000Z
|
pytorch/skin_lesion_classification/plots.py
|
deephealthproject/use-case-pipelines
|
ea9c8aedfbc9084e1a5350f6f73def2578258c77
|
[
"MIT"
] | 5
|
2020-05-18T09:44:03.000Z
|
2020-11-29T12:58:28.000Z
|
import itertools
import matplotlib.pyplot as plt
import numpy as np
def plot_sequence(filename, sequences, legend=None):
"""Plots one or more sequences of values into a file
:param filename: output filename
:param sequences: (M x N) array-like structure containing M sequences of N values
:param legend: (M) array-like legend
:return:
"""
fig = plt.figure()
for sequence in sequences:
plt.plot(range(len(sequence)), sequence)
if legend:
plt.legend(legend)
plt.savefig(filename)
plt.close(fig)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.ylim(-0.5, len(classes) - 0.5)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| 29.844828
| 85
| 0.617562
|
import itertools
import matplotlib.pyplot as plt
import numpy as np
def plot_sequence(filename, sequences, legend=None):
fig = plt.figure()
for sequence in sequences:
plt.plot(range(len(sequence)), sequence)
if legend:
plt.legend(legend)
plt.savefig(filename)
plt.close(fig)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.ylim(-0.5, len(classes) - 0.5)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| true
| true
|
1c4586bd66611dfec7cf8c2a805839086f354af2
| 232
|
py
|
Python
|
lib/JumpScale/baselib/dnsman/dnsFactory.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
lib/JumpScale/baselib/dnsman/dnsFactory.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 4
|
2016-08-25T12:08:39.000Z
|
2018-04-12T12:36:01.000Z
|
lib/JumpScale/baselib/dnsman/dnsFactory.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 3
|
2016-03-08T07:49:34.000Z
|
2018-10-19T13:56:43.000Z
|
from bind import BindDNS
class DNSFactory(object):
def __init__(self):
self.bindObj = None
@property
def bind(self):
if not self.bindObj:
self.bindObj = BindDNS()
return self.bindObj
| 21.090909
| 36
| 0.612069
|
from bind import BindDNS
class DNSFactory(object):
def __init__(self):
self.bindObj = None
@property
def bind(self):
if not self.bindObj:
self.bindObj = BindDNS()
return self.bindObj
| true
| true
|
1c45875f0c9405efffecbefbf3c272cc94cee782
| 7,536
|
py
|
Python
|
main/cloudfoundry_client/v2/entities.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | null | null | null |
main/cloudfoundry_client/v2/entities.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | null | null | null |
main/cloudfoundry_client/v2/entities.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial, reduce
from typing import Callable, List, Tuple, Any, Optional, Generator, TYPE_CHECKING
from urllib.parse import quote
from requests import Response
from cloudfoundry_client.errors import InvalidEntity
from cloudfoundry_client.json_object import JsonObject
from cloudfoundry_client.request_object import Request
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
class Entity(JsonObject):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient", *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.target_endpoint = target_endpoint
self.client = client
try:
if not (isinstance(self.get("entity"), dict)):
raise InvalidEntity(**self)
for attribute, value in list(self["entity"].items()):
domain_name, suffix = attribute.rpartition("_")[::2]
if suffix == "url":
manager_name = domain_name if domain_name.endswith("s") else "%ss" % domain_name
try:
other_manager = getattr(client.v2, manager_name)
except AttributeError:
# generic manager
other_manager = EntityManager(target_endpoint, client, "")
if domain_name.endswith("s"):
new_method = partial(other_manager._list, value)
else:
new_method = partial(other_manager._get, value)
new_method.__name__ = domain_name
setattr(self, domain_name, new_method)
except KeyError:
raise InvalidEntity(**self)
EntityBuilder = Callable[[List[Tuple[str, Any]]], Entity]
PaginateEntities = Generator[Entity, None, None]
class EntityManager(object):
list_query_parameters = ["page", "results-per-page", "order-direction"]
list_multi_parameters = ["order-by"]
timestamp_parameters = ["timestamp"]
def __init__(
self, target_endpoint: str, client: "CloudFoundryClient", entity_uri: str, entity_builder: Optional[EntityBuilder] = None
):
self.target_endpoint = target_endpoint
self.entity_uri = entity_uri
self.client = client
self.entity_builder = (
entity_builder if entity_builder is not None else lambda pairs: Entity(target_endpoint, client, pairs)
)
def _list(self, requested_path: str, entity_builder: Optional[EntityBuilder] = None, **kwargs) -> PaginateEntities:
url_requested = self._get_url_filtered("%s%s" % (self.target_endpoint, requested_path), **kwargs)
response = self.client.get(url_requested)
entity_builder = self._get_entity_builder(entity_builder)
while True:
response_json = self._read_response(response, JsonObject)
for resource in response_json["resources"]:
yield entity_builder(list(resource.items()))
if response_json["next_url"] is None:
break
else:
url_requested = "%s%s" % (self.target_endpoint, response_json["next_url"])
response = self.client.get(url_requested)
def _create(self, data: dict, **kwargs) -> Entity:
url = "%s%s" % (self.target_endpoint, self.entity_uri)
return self._post(url, data, **kwargs)
def _update(self, resource_id: str, data: dict, **kwargs):
url = "%s%s/%s" % (self.target_endpoint, self.entity_uri, resource_id)
return self._put(url, data, **kwargs)
def _remove(self, resource_id: str, **kwargs):
url = "%s%s/%s" % (self.target_endpoint, self.entity_uri, resource_id)
self._delete(url, **kwargs)
def _get(self, requested_path: str, entity_builder: Optional[EntityBuilder] = None) -> Entity:
url = "%s%s" % (self.target_endpoint, requested_path)
response = self.client.get(url)
return self._read_response(response, entity_builder)
def _post(self, url: str, data: Optional[dict] = None, **kwargs):
response = self.client.post(url, json=data, **kwargs)
return self._read_response(response)
def _put(self, url: str, data: Optional[dict] = None, **kwargs):
response = self.client.put(url, json=data, **kwargs)
return self._read_response(response)
def _delete(self, url: str, **kwargs):
self.client.delete(url, **kwargs)
def __iter__(self) -> PaginateEntities:
return self.list()
def __getitem__(self, entity_guid) -> Entity:
return self.get(entity_guid)
def list(self, **kwargs) -> PaginateEntities:
return self._list(self.entity_uri, **kwargs)
def get_first(self, **kwargs) -> Optional[Entity]:
kwargs.setdefault("results-per-page", 1)
for entity in self._list(self.entity_uri, **kwargs):
return entity
return None
def get(self, entity_id: str, *extra_paths) -> Entity:
if len(extra_paths) == 0:
requested_path = "%s/%s" % (self.entity_uri, entity_id)
else:
requested_path = "%s/%s/%s" % (self.entity_uri, entity_id, "/".join(extra_paths))
return self._get(requested_path)
def _read_response(self, response: Response, other_entity_builder: Optional[EntityBuilder] = None):
entity_builder = self._get_entity_builder(other_entity_builder)
result = response.json(object_pairs_hook=JsonObject)
return entity_builder(list(result.items()))
@staticmethod
def _request(**mandatory_parameters) -> Request:
return Request(**mandatory_parameters)
def _get_entity_builder(self, entity_builder: Optional[EntityBuilder]) -> EntityBuilder:
if entity_builder is None:
return self.entity_builder
else:
return entity_builder
def _get_url_filtered(self, url: str, **kwargs) -> str:
def _append_encoded_parameter(parameters: List[str], args: Tuple[str, Any]) -> List[str]:
parameter_name, parameter_value = args[0], args[1]
if parameter_name in self.list_query_parameters:
parameters.append("%s=%s" % (parameter_name, str(parameter_value)))
elif parameter_name in self.list_multi_parameters:
value_list = parameter_value
if not isinstance(value_list, (list, tuple)):
value_list = [value_list]
for value in value_list:
parameters.append("%s=%s" % (parameter_name, str(value)))
elif parameter_name in self.timestamp_parameters:
if isinstance(args[1], dict):
operator_list = args[1].keys()
for operator in operator_list:
parameters.append("q=%s" % quote("%s%s%s" % (parameter_name, operator, args[1][operator])))
else:
parameters.append("q=%s" % quote("%s:%s" % (parameter_name, str(parameter_value))))
elif isinstance(parameter_value, (list, tuple)):
parameters.append("q=%s" % quote("%s IN %s" % (parameter_name, ",".join(parameter_value))))
else:
parameters.append("q=%s" % quote("%s:%s" % (parameter_name, str(parameter_value))))
return parameters
if len(kwargs) > 0:
return "%s?%s" % (url, "&".join(reduce(_append_encoded_parameter, sorted(list(kwargs.items())), [])))
else:
return url
| 43.813953
| 129
| 0.625398
|
from functools import partial, reduce
from typing import Callable, List, Tuple, Any, Optional, Generator, TYPE_CHECKING
from urllib.parse import quote
from requests import Response
from cloudfoundry_client.errors import InvalidEntity
from cloudfoundry_client.json_object import JsonObject
from cloudfoundry_client.request_object import Request
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
class Entity(JsonObject):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient", *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.target_endpoint = target_endpoint
self.client = client
try:
if not (isinstance(self.get("entity"), dict)):
raise InvalidEntity(**self)
for attribute, value in list(self["entity"].items()):
domain_name, suffix = attribute.rpartition("_")[::2]
if suffix == "url":
manager_name = domain_name if domain_name.endswith("s") else "%ss" % domain_name
try:
other_manager = getattr(client.v2, manager_name)
except AttributeError:
other_manager = EntityManager(target_endpoint, client, "")
if domain_name.endswith("s"):
new_method = partial(other_manager._list, value)
else:
new_method = partial(other_manager._get, value)
new_method.__name__ = domain_name
setattr(self, domain_name, new_method)
except KeyError:
raise InvalidEntity(**self)
EntityBuilder = Callable[[List[Tuple[str, Any]]], Entity]
PaginateEntities = Generator[Entity, None, None]
class EntityManager(object):
list_query_parameters = ["page", "results-per-page", "order-direction"]
list_multi_parameters = ["order-by"]
timestamp_parameters = ["timestamp"]
def __init__(
self, target_endpoint: str, client: "CloudFoundryClient", entity_uri: str, entity_builder: Optional[EntityBuilder] = None
):
self.target_endpoint = target_endpoint
self.entity_uri = entity_uri
self.client = client
self.entity_builder = (
entity_builder if entity_builder is not None else lambda pairs: Entity(target_endpoint, client, pairs)
)
def _list(self, requested_path: str, entity_builder: Optional[EntityBuilder] = None, **kwargs) -> PaginateEntities:
url_requested = self._get_url_filtered("%s%s" % (self.target_endpoint, requested_path), **kwargs)
response = self.client.get(url_requested)
entity_builder = self._get_entity_builder(entity_builder)
while True:
response_json = self._read_response(response, JsonObject)
for resource in response_json["resources"]:
yield entity_builder(list(resource.items()))
if response_json["next_url"] is None:
break
else:
url_requested = "%s%s" % (self.target_endpoint, response_json["next_url"])
response = self.client.get(url_requested)
def _create(self, data: dict, **kwargs) -> Entity:
url = "%s%s" % (self.target_endpoint, self.entity_uri)
return self._post(url, data, **kwargs)
def _update(self, resource_id: str, data: dict, **kwargs):
url = "%s%s/%s" % (self.target_endpoint, self.entity_uri, resource_id)
return self._put(url, data, **kwargs)
def _remove(self, resource_id: str, **kwargs):
url = "%s%s/%s" % (self.target_endpoint, self.entity_uri, resource_id)
self._delete(url, **kwargs)
def _get(self, requested_path: str, entity_builder: Optional[EntityBuilder] = None) -> Entity:
url = "%s%s" % (self.target_endpoint, requested_path)
response = self.client.get(url)
return self._read_response(response, entity_builder)
def _post(self, url: str, data: Optional[dict] = None, **kwargs):
response = self.client.post(url, json=data, **kwargs)
return self._read_response(response)
def _put(self, url: str, data: Optional[dict] = None, **kwargs):
response = self.client.put(url, json=data, **kwargs)
return self._read_response(response)
def _delete(self, url: str, **kwargs):
self.client.delete(url, **kwargs)
def __iter__(self) -> PaginateEntities:
return self.list()
def __getitem__(self, entity_guid) -> Entity:
return self.get(entity_guid)
def list(self, **kwargs) -> PaginateEntities:
return self._list(self.entity_uri, **kwargs)
def get_first(self, **kwargs) -> Optional[Entity]:
kwargs.setdefault("results-per-page", 1)
for entity in self._list(self.entity_uri, **kwargs):
return entity
return None
def get(self, entity_id: str, *extra_paths) -> Entity:
if len(extra_paths) == 0:
requested_path = "%s/%s" % (self.entity_uri, entity_id)
else:
requested_path = "%s/%s/%s" % (self.entity_uri, entity_id, "/".join(extra_paths))
return self._get(requested_path)
def _read_response(self, response: Response, other_entity_builder: Optional[EntityBuilder] = None):
entity_builder = self._get_entity_builder(other_entity_builder)
result = response.json(object_pairs_hook=JsonObject)
return entity_builder(list(result.items()))
@staticmethod
def _request(**mandatory_parameters) -> Request:
return Request(**mandatory_parameters)
def _get_entity_builder(self, entity_builder: Optional[EntityBuilder]) -> EntityBuilder:
if entity_builder is None:
return self.entity_builder
else:
return entity_builder
def _get_url_filtered(self, url: str, **kwargs) -> str:
def _append_encoded_parameter(parameters: List[str], args: Tuple[str, Any]) -> List[str]:
parameter_name, parameter_value = args[0], args[1]
if parameter_name in self.list_query_parameters:
parameters.append("%s=%s" % (parameter_name, str(parameter_value)))
elif parameter_name in self.list_multi_parameters:
value_list = parameter_value
if not isinstance(value_list, (list, tuple)):
value_list = [value_list]
for value in value_list:
parameters.append("%s=%s" % (parameter_name, str(value)))
elif parameter_name in self.timestamp_parameters:
if isinstance(args[1], dict):
operator_list = args[1].keys()
for operator in operator_list:
parameters.append("q=%s" % quote("%s%s%s" % (parameter_name, operator, args[1][operator])))
else:
parameters.append("q=%s" % quote("%s:%s" % (parameter_name, str(parameter_value))))
elif isinstance(parameter_value, (list, tuple)):
parameters.append("q=%s" % quote("%s IN %s" % (parameter_name, ",".join(parameter_value))))
else:
parameters.append("q=%s" % quote("%s:%s" % (parameter_name, str(parameter_value))))
return parameters
if len(kwargs) > 0:
return "%s?%s" % (url, "&".join(reduce(_append_encoded_parameter, sorted(list(kwargs.items())), [])))
else:
return url
| true
| true
|
1c4587d7f261fcbda3642a50322883ae48f591a2
| 8,013
|
py
|
Python
|
karton/config_extractor/config_extractor.py
|
kscieslinski/karton-config-extractor
|
c0eb0bddeed2b217abe517ca1b8a20e679506dba
|
[
"BSD-3-Clause"
] | null | null | null |
karton/config_extractor/config_extractor.py
|
kscieslinski/karton-config-extractor
|
c0eb0bddeed2b217abe517ca1b8a20e679506dba
|
[
"BSD-3-Clause"
] | null | null | null |
karton/config_extractor/config_extractor.py
|
kscieslinski/karton-config-extractor
|
c0eb0bddeed2b217abe517ca1b8a20e679506dba
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
import gc
import hashlib
import json
import os
import re
from karton.core import Config, Karton, Resource, Task
from karton.core.resource import ResourceBase
from malduck.extractor import ExtractManager, ExtractorModules
from .__version__ import __version__
class AnalysisExtractManager(ExtractManager):
"""
Patched version of original ExtractManager, providing current karton interface
"""
def __init__(self, karton: "ConfigExtractor") -> None:
super(AnalysisExtractManager, self).__init__(karton.modules)
self.karton = karton
def create_extractor(karton: "ConfigExtractor") -> AnalysisExtractManager:
return AnalysisExtractManager(karton)
class ConfigExtractor(Karton):
"""
Extracts configuration from samples and Drakvuf Sandbox analyses
"""
identity = "karton.config-extractor"
version = __version__
persistent = True
filters = [
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win32",
},
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win64",
},
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "linux",
},
{"type": "analysis", "kind": "drakrun-prod"},
{"type": "analysis", "kind": "drakrun"},
]
@classmethod
def args_parser(cls):
parser = super().args_parser()
parser.add_argument(
"--modules",
help="Malduck extractor modules directory",
default="extractor/modules",
)
return parser
@classmethod
def main(cls):
parser = cls.args_parser()
args = parser.parse_args()
config = Config(args.config_file)
service = ConfigExtractor(config, modules=args.modules)
service.loop()
def __init__(self, config: Config, modules: str) -> None:
super().__init__(config)
self.modules = ExtractorModules(modules)
def report_config(self, config, sample, parent=None):
legacy_config = dict(config)
legacy_config["type"] = config["family"]
del legacy_config["family"]
# This allows us to spawn karton tasks for special config handling
if "store-in-karton" in legacy_config:
self.log.info("Karton tasks found in config, sending")
for karton_task in legacy_config["store-in-karton"]:
task_data = karton_task["task"]
payload_data = karton_task["payload"]
payload_data["parent"] = parent or sample
task = Task(headers=task_data, payload=payload_data)
self.send_task(task)
self.log.info("Sending ripped task %s", task.uid)
del legacy_config["store-in-karton"]
if len(legacy_config.items()) == 1:
self.log.info("Final config is empty, not sending it to the reporter")
return
task = Task(
{
"type": "config",
"kind": "static",
"family": config["family"],
"quality": self.current_task.headers.get("quality", "high"),
},
payload={
"config": legacy_config,
"sample": sample,
"parent": parent or sample,
},
)
self.send_task(task)
# analyze a standard, non-dump sample
def analyze_sample(self, sample: ResourceBase) -> None:
extractor = create_extractor(self)
with sample.download_temporary_file() as temp: # type: ignore
extractor.push_file(temp.name)
configs = extractor.config
if configs:
config = configs[0]
self.log.info("Got config: {}".format(json.dumps(config)))
self.report_config(config, sample)
else:
self.log.info("Failed to get config")
# analyze a drakrun analysis
def analyze_drakrun(self, sample, path):
extractor = create_extractor(self)
dumps_path = os.path.join(path, "dumps")
dump_candidates = {}
results = {
"analysed": 0,
"crashed": 0,
}
analysis_dumps = sorted(os.listdir(dumps_path))
for i, dump in enumerate(analysis_dumps):
# catch only dumps
if re.match(r"^[a-f0-9]{4,16}_[a-f0-9]{16}$", dump):
results["analysed"] += 1
self.log.debug(
"Analyzing dump %d/%d %s", i, len(analysis_dumps), str(dump)
)
dump_path = os.path.join(dumps_path, dump)
with open(dump_path, "rb") as f:
dump_data = f.read()
if not dump_data:
self.log.warning("Dump {} is empty".format(dump))
continue
base = int(dump.split("_")[0], 16)
try:
family = extractor.push_file(dump_path, base=base)
if family:
self.log.info("Found better %s config in %s", family, dump)
dump_candidates[family] = (dump, dump_data)
except Exception:
self.log.exception("Error while extracting from {}".format(dump))
results["crashed"] += 1
self.log.debug("Finished analysing dump no. %d", i)
self.log.info("Merging and reporting extracted configs")
for family, config in extractor.configs.items():
dump, dump_data = dump_candidates[family]
self.log.info("* (%s) %s => %s", family, dump, json.dumps(config))
parent = Resource(name=dump, content=dump_data)
task = Task(
{
"type": "sample",
"stage": "analyzed",
"kind": "dump",
"platform": "win32",
"extension": "exe",
},
payload={
"sample": parent,
"parent": sample,
"tags": ["dump:win32:exe"],
},
)
self.send_task(task)
self.report_config(config, sample, parent=parent)
self.log.info("done analysing, results: {}".format(json.dumps(results)))
def process(self, task: Task) -> None: # type: ignore
sample = task.get_resource("sample")
headers = task.headers
if headers["type"] == "sample":
self.log.info("Analyzing original binary")
self.analyze_sample(sample)
elif headers["type"] == "analysis" and headers["kind"] == "drakrun-prod":
analysis = task.get_resource("analysis")
if analysis.size > 1024 * 1024 * 128:
self.log.info("Analysis is too large, aborting")
return
with analysis.extract_temporary() as fpath: # type: ignore
with open(os.path.join(fpath, "sample.txt"), "r") as f:
sample_hash = f.read()
self.log.info(
"Processing drakmon analysis, sample: {}".format(sample_hash)
)
self.analyze_drakrun(sample, fpath)
elif headers["type"] == "analysis" and headers["kind"] == "drakrun":
# DRAKVUF Sandbox (codename: drakmon OSS)
sample_hash = hashlib.sha256(sample.content or b"").hexdigest()
self.log.info(
"Processing drakmon OSS analysis, sample: {}".format(sample_hash)
)
dumps = task.get_resource("dumps.zip")
with dumps.extract_temporary() as tmpdir: # type: ignore
self.analyze_drakrun(sample, tmpdir)
self.log.debug("Printing gc stats")
self.log.debug(gc.get_stats())
| 34.097872
| 85
| 0.539998
|
import gc
import hashlib
import json
import os
import re
from karton.core import Config, Karton, Resource, Task
from karton.core.resource import ResourceBase
from malduck.extractor import ExtractManager, ExtractorModules
from .__version__ import __version__
class AnalysisExtractManager(ExtractManager):
def __init__(self, karton: "ConfigExtractor") -> None:
super(AnalysisExtractManager, self).__init__(karton.modules)
self.karton = karton
def create_extractor(karton: "ConfigExtractor") -> AnalysisExtractManager:
return AnalysisExtractManager(karton)
class ConfigExtractor(Karton):
identity = "karton.config-extractor"
version = __version__
persistent = True
filters = [
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win32",
},
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win64",
},
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "linux",
},
{"type": "analysis", "kind": "drakrun-prod"},
{"type": "analysis", "kind": "drakrun"},
]
@classmethod
def args_parser(cls):
parser = super().args_parser()
parser.add_argument(
"--modules",
help="Malduck extractor modules directory",
default="extractor/modules",
)
return parser
@classmethod
def main(cls):
parser = cls.args_parser()
args = parser.parse_args()
config = Config(args.config_file)
service = ConfigExtractor(config, modules=args.modules)
service.loop()
def __init__(self, config: Config, modules: str) -> None:
super().__init__(config)
self.modules = ExtractorModules(modules)
def report_config(self, config, sample, parent=None):
legacy_config = dict(config)
legacy_config["type"] = config["family"]
del legacy_config["family"]
if "store-in-karton" in legacy_config:
self.log.info("Karton tasks found in config, sending")
for karton_task in legacy_config["store-in-karton"]:
task_data = karton_task["task"]
payload_data = karton_task["payload"]
payload_data["parent"] = parent or sample
task = Task(headers=task_data, payload=payload_data)
self.send_task(task)
self.log.info("Sending ripped task %s", task.uid)
del legacy_config["store-in-karton"]
if len(legacy_config.items()) == 1:
self.log.info("Final config is empty, not sending it to the reporter")
return
task = Task(
{
"type": "config",
"kind": "static",
"family": config["family"],
"quality": self.current_task.headers.get("quality", "high"),
},
payload={
"config": legacy_config,
"sample": sample,
"parent": parent or sample,
},
)
self.send_task(task)
def analyze_sample(self, sample: ResourceBase) -> None:
extractor = create_extractor(self)
with sample.download_temporary_file() as temp:
extractor.push_file(temp.name)
configs = extractor.config
if configs:
config = configs[0]
self.log.info("Got config: {}".format(json.dumps(config)))
self.report_config(config, sample)
else:
self.log.info("Failed to get config")
def analyze_drakrun(self, sample, path):
extractor = create_extractor(self)
dumps_path = os.path.join(path, "dumps")
dump_candidates = {}
results = {
"analysed": 0,
"crashed": 0,
}
analysis_dumps = sorted(os.listdir(dumps_path))
for i, dump in enumerate(analysis_dumps):
if re.match(r"^[a-f0-9]{4,16}_[a-f0-9]{16}$", dump):
results["analysed"] += 1
self.log.debug(
"Analyzing dump %d/%d %s", i, len(analysis_dumps), str(dump)
)
dump_path = os.path.join(dumps_path, dump)
with open(dump_path, "rb") as f:
dump_data = f.read()
if not dump_data:
self.log.warning("Dump {} is empty".format(dump))
continue
base = int(dump.split("_")[0], 16)
try:
family = extractor.push_file(dump_path, base=base)
if family:
self.log.info("Found better %s config in %s", family, dump)
dump_candidates[family] = (dump, dump_data)
except Exception:
self.log.exception("Error while extracting from {}".format(dump))
results["crashed"] += 1
self.log.debug("Finished analysing dump no. %d", i)
self.log.info("Merging and reporting extracted configs")
for family, config in extractor.configs.items():
dump, dump_data = dump_candidates[family]
self.log.info("* (%s) %s => %s", family, dump, json.dumps(config))
parent = Resource(name=dump, content=dump_data)
task = Task(
{
"type": "sample",
"stage": "analyzed",
"kind": "dump",
"platform": "win32",
"extension": "exe",
},
payload={
"sample": parent,
"parent": sample,
"tags": ["dump:win32:exe"],
},
)
self.send_task(task)
self.report_config(config, sample, parent=parent)
self.log.info("done analysing, results: {}".format(json.dumps(results)))
def process(self, task: Task) -> None:
sample = task.get_resource("sample")
headers = task.headers
if headers["type"] == "sample":
self.log.info("Analyzing original binary")
self.analyze_sample(sample)
elif headers["type"] == "analysis" and headers["kind"] == "drakrun-prod":
analysis = task.get_resource("analysis")
if analysis.size > 1024 * 1024 * 128:
self.log.info("Analysis is too large, aborting")
return
with analysis.extract_temporary() as fpath:
with open(os.path.join(fpath, "sample.txt"), "r") as f:
sample_hash = f.read()
self.log.info(
"Processing drakmon analysis, sample: {}".format(sample_hash)
)
self.analyze_drakrun(sample, fpath)
elif headers["type"] == "analysis" and headers["kind"] == "drakrun":
sample_hash = hashlib.sha256(sample.content or b"").hexdigest()
self.log.info(
"Processing drakmon OSS analysis, sample: {}".format(sample_hash)
)
dumps = task.get_resource("dumps.zip")
with dumps.extract_temporary() as tmpdir:
self.analyze_drakrun(sample, tmpdir)
self.log.debug("Printing gc stats")
self.log.debug(gc.get_stats())
| true
| true
|
1c458914cb33dd348d349ab2d97c4bf9208ef056
| 6,011
|
py
|
Python
|
Code/PrepareTables/SelectedROICorrs_positionVar.py
|
cirmuw/functional-twin-analysis
|
b6730f09f2143d5372f1a90d5fac47e3385e54fb
|
[
"Apache-2.0"
] | null | null | null |
Code/PrepareTables/SelectedROICorrs_positionVar.py
|
cirmuw/functional-twin-analysis
|
b6730f09f2143d5372f1a90d5fac47e3385e54fb
|
[
"Apache-2.0"
] | null | null | null |
Code/PrepareTables/SelectedROICorrs_positionVar.py
|
cirmuw/functional-twin-analysis
|
b6730f09f2143d5372f1a90d5fac47e3385e54fb
|
[
"Apache-2.0"
] | null | null | null |
#script to create tabels containig x, y and z coordinates of functionally corresponding vertices (position variability) for each twin, one table per vertex
#input:id of functionally corresponding vetices of each twin to reference
#output: tables with vertex position in each subject, one table per vetex
import numpy as np
import nibabel as nib
import pandas as pd
from glob import glob
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import settings as s
import pickle
#paths to subject data,id of vertices without signal, surface file, parcelation, chosen rois
infile =s.HCP_information_sheet_path #\
subjectpath1=s.HCProot+'HCP_3T_RESTA_fmri/'# used obtain subject ids
subjectpath2=s.HCProot+'HCP_3T_RESTB_fmri/'#/
source_dir=s.projectfolder+'7NETS_vertex/5_7nets_corresponding/' # path containing id of functionally corresponding vetices of each twin to reference
target_dir=s.projectfolder+'/7NETS_vertex/10_PositionVar_cosine/'# output tables with vertex position in each subject
if not os.path.exists(target_dir):
os.mkdir(target_dir)
zerovertexlh=np.load('../../Deliveries/0verticeslh.npy')#ids of vertices without signal
zerovertexrh=np.load('../../Deliveries/0verticesrh.npy')
surfacedirlh='../../Deliveries/fsaverage4/lh.inflated' # surface on which vertex coordinates are based
surfacedirrh='../../Deliveries/fsaverage4/rh.inflated'
lhsurf=nib.freesurfer.io.read_geometry(surfacedirlh)
rhsurf=nib.freesurfer.io.read_geometry(surfacedirrh)
lhsurf=lhsurf[0]
lhsurf=np.delete(lhsurf,zerovertexlh,0)
rhsurf=rhsurf[0]
rhsurf=np.delete(rhsurf,zerovertexrh,0)
surf=np.concatenate([lhsurf,rhsurf],axis=0)
lhparpath='../../Deliveries/lh.Schaefer2018_600Parcels_7Networks_order.annot'
rhparpath='../../Deliveries/rh.Schaefer2018_600Parcels_7Networks_order.annot'
lhannot=nib.freesurfer.io.read_annot(lhparpath)
lhlabels=lhannot[0]
rhannot=nib.freesurfer.io.read_annot(rhparpath)
rhlabels=rhannot[0]
labelslh=np.delete(lhlabels,zerovertexlh,0)
labelsrh=np.delete(rhlabels,zerovertexrh,0)
lhrois=list(np.load('../../Deliveries/chosenroislh.npy'))#save id of chosen rois
rhrois=list(np.load('../../Deliveries/chosenroisrh.npy'))
lhrois=lhrois[1:]
rhrois=rhrois[1:]
nameslhrois=['l_'+str(s) for s in lhrois]
namesrhrois=['r_'+str(s) for s in rhrois]
#get assigenment of parcels to yeo nets based on color table
lhnetwork=np.zeros((9))
rhnetwork=np.zeros((9))
lhnetwork[8]=301
rhnetwork[8]=301
c1=1
c2=1
for i in range(1,301):
if abs(lhannot[1][i][0]-lhannot[1][i-1][0])>5:
lhnetwork[c1]=int(i)
c1=c1+1
if abs(rhannot[1][i][0]-rhannot[1][i-1][0])>5:
rhnetwork[c2]=int(i)
c2=c2+1
#Get paths to mgh-files of available subjects
xl=pd.ExcelFile(infile)
dataframe1=xl.parse('Sheet1')
isNotTwin=dataframe1['Twin_Stat']=='NotTwin'
isNotTwin=np.where(isNotTwin)[0]
dataframe2=dataframe1.drop(isNotTwin,0)
Subjects=dataframe2['Subject'].values
path1=[]
path2=[]
for i in range(Subjects.shape[0]):
path1.append(subjectpath1+str(Subjects[i]))
path2.append(subjectpath2+str(Subjects[i]))
truesubjects=[]
for i in range(Subjects.shape[0]):
if os.path.isdir(path1[i])==True:
truesubjects.append(Subjects[i])
if os.path.isdir(path2[i])==True:
truesubjects.append(Subjects[i])
name=['Subject','Zygosity','Mother_ID']
nonvertexdat=np.zeros((len(truesubjects),3),dtype=object)
for j in range(len(labelslh)):
if labelslh[j]!=0:
positionvar=[]
for i in range(len(truesubjects)):
functional=pickle.load(open(source_dir+'lh_'+str(j+1)+'correspondingvertices.p','rb'))
index=np.where(functional[1]==-1)[0]
index=functional[0][i][index]
index=index[0]
coords=surf[index]
positionframe=pd.DataFrame(coords)
positionframe.columns=['x','y','z']
positionvar.append(positionframe)
if j==0:
index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()
tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])
nonvertexdat[i,:]=tmp1
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
positionframe=pd.concat(positionvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,positionframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=target_dir+'lh_'+str(j+1)+'_mean_position.csv.gz'
table.to_csv(writefile, compression='gzip')
for j in range(len(labelsrh)):
if labelsrh[j]!=0:
positionvar=[]
for i in range(len(truesubjects)):
functional=pickle.load(open(source_dir+'rh_'+str(j+1)+'correspondingvertices.p','rb'))
index=np.where(functional[1]==-1)[0]
index=functional[0][i][index]
index=index[0]
coords=surf[index]
positionframe=pd.DataFrame(coords)
positionframe.columns=['x','y','z']
positionvar.append(positionframe)
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
positionframe=pd.concat(positionvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,positionframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=target_dir+'rh_'+str(j+1)+'_mean_position.csv.gz'
table.to_csv(writefile, compression='gzip')
print('Finished')
| 38.044304
| 155
| 0.683081
|
import numpy as np
import nibabel as nib
import pandas as pd
from glob import glob
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import settings as s
import pickle
infile =s.HCP_information_sheet_path
subjectpath1=s.HCProot+'HCP_3T_RESTA_fmri/'
subjectpath2=s.HCProot+'HCP_3T_RESTB_fmri/'
source_dir=s.projectfolder+'7NETS_vertex/5_7nets_corresponding/'
target_dir=s.projectfolder+'/7NETS_vertex/10_PositionVar_cosine/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
zerovertexlh=np.load('../../Deliveries/0verticeslh.npy')
zerovertexrh=np.load('../../Deliveries/0verticesrh.npy')
surfacedirlh='../../Deliveries/fsaverage4/lh.inflated'
surfacedirrh='../../Deliveries/fsaverage4/rh.inflated'
lhsurf=nib.freesurfer.io.read_geometry(surfacedirlh)
rhsurf=nib.freesurfer.io.read_geometry(surfacedirrh)
lhsurf=lhsurf[0]
lhsurf=np.delete(lhsurf,zerovertexlh,0)
rhsurf=rhsurf[0]
rhsurf=np.delete(rhsurf,zerovertexrh,0)
surf=np.concatenate([lhsurf,rhsurf],axis=0)
lhparpath='../../Deliveries/lh.Schaefer2018_600Parcels_7Networks_order.annot'
rhparpath='../../Deliveries/rh.Schaefer2018_600Parcels_7Networks_order.annot'
lhannot=nib.freesurfer.io.read_annot(lhparpath)
lhlabels=lhannot[0]
rhannot=nib.freesurfer.io.read_annot(rhparpath)
rhlabels=rhannot[0]
labelslh=np.delete(lhlabels,zerovertexlh,0)
labelsrh=np.delete(rhlabels,zerovertexrh,0)
lhrois=list(np.load('../../Deliveries/chosenroislh.npy'))
rhrois=list(np.load('../../Deliveries/chosenroisrh.npy'))
lhrois=lhrois[1:]
rhrois=rhrois[1:]
nameslhrois=['l_'+str(s) for s in lhrois]
namesrhrois=['r_'+str(s) for s in rhrois]
lhnetwork=np.zeros((9))
rhnetwork=np.zeros((9))
lhnetwork[8]=301
rhnetwork[8]=301
c1=1
c2=1
for i in range(1,301):
if abs(lhannot[1][i][0]-lhannot[1][i-1][0])>5:
lhnetwork[c1]=int(i)
c1=c1+1
if abs(rhannot[1][i][0]-rhannot[1][i-1][0])>5:
rhnetwork[c2]=int(i)
c2=c2+1
xl=pd.ExcelFile(infile)
dataframe1=xl.parse('Sheet1')
isNotTwin=dataframe1['Twin_Stat']=='NotTwin'
isNotTwin=np.where(isNotTwin)[0]
dataframe2=dataframe1.drop(isNotTwin,0)
Subjects=dataframe2['Subject'].values
path1=[]
path2=[]
for i in range(Subjects.shape[0]):
path1.append(subjectpath1+str(Subjects[i]))
path2.append(subjectpath2+str(Subjects[i]))
truesubjects=[]
for i in range(Subjects.shape[0]):
if os.path.isdir(path1[i])==True:
truesubjects.append(Subjects[i])
if os.path.isdir(path2[i])==True:
truesubjects.append(Subjects[i])
name=['Subject','Zygosity','Mother_ID']
nonvertexdat=np.zeros((len(truesubjects),3),dtype=object)
for j in range(len(labelslh)):
if labelslh[j]!=0:
positionvar=[]
for i in range(len(truesubjects)):
functional=pickle.load(open(source_dir+'lh_'+str(j+1)+'correspondingvertices.p','rb'))
index=np.where(functional[1]==-1)[0]
index=functional[0][i][index]
index=index[0]
coords=surf[index]
positionframe=pd.DataFrame(coords)
positionframe.columns=['x','y','z']
positionvar.append(positionframe)
if j==0:
index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()
tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])
nonvertexdat[i,:]=tmp1
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
positionframe=pd.concat(positionvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,positionframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=target_dir+'lh_'+str(j+1)+'_mean_position.csv.gz'
table.to_csv(writefile, compression='gzip')
for j in range(len(labelsrh)):
if labelsrh[j]!=0:
positionvar=[]
for i in range(len(truesubjects)):
functional=pickle.load(open(source_dir+'rh_'+str(j+1)+'correspondingvertices.p','rb'))
index=np.where(functional[1]==-1)[0]
index=functional[0][i][index]
index=index[0]
coords=surf[index]
positionframe=pd.DataFrame(coords)
positionframe.columns=['x','y','z']
positionvar.append(positionframe)
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
positionframe=pd.concat(positionvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,positionframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=target_dir+'rh_'+str(j+1)+'_mean_position.csv.gz'
table.to_csv(writefile, compression='gzip')
print('Finished')
| true
| true
|
1c458bedfb80717a0139eb3f7187e74d5601bb56
| 477
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/basic/templatetags/form_tags.py
|
cubicuboctahedron/cookiecutter-django-wagtail
|
d7f668ce09ba2c4a3f98045ab8a6fcd286d36553
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/basic/templatetags/form_tags.py
|
cubicuboctahedron/cookiecutter-django-wagtail
|
d7f668ce09ba2c4a3f98045ab8a6fcd286d36553
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/basic/templatetags/form_tags.py
|
cubicuboctahedron/cookiecutter-django-wagtail
|
d7f668ce09ba2c4a3f98045ab8a6fcd286d36553
|
[
"Apache-2.0"
] | 1
|
2020-04-07T10:07:07.000Z
|
2020-04-07T10:07:07.000Z
|
from django import template
register = template.Library()
@register.filter(name='addcss')
def addcss(field, css):
return field.as_widget(attrs={"class":css})
@register.filter(name='add_attributes')
def add_attributes(field, css):
attrs = {}
definition = css.split(',')
for d in definition:
if ':' not in d:
attrs['class'] = d
else:
t, v = d.split(':')
attrs[t] = v
return field.as_widget(attrs=attrs)
| 21.681818
| 46
| 0.597484
|
from django import template
register = template.Library()
@register.filter(name='addcss')
def addcss(field, css):
return field.as_widget(attrs={"class":css})
@register.filter(name='add_attributes')
def add_attributes(field, css):
attrs = {}
definition = css.split(',')
for d in definition:
if ':' not in d:
attrs['class'] = d
else:
t, v = d.split(':')
attrs[t] = v
return field.as_widget(attrs=attrs)
| true
| true
|
1c458c303d4a0d97db1662628a538701eb8cf2dd
| 1,049
|
py
|
Python
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/BatchTaskCreateInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 128
|
2018-10-29T04:11:47.000Z
|
2022-03-07T02:19:14.000Z
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/BatchTaskCreateInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 40
|
2018-11-02T00:40:48.000Z
|
2021-12-07T09:33:56.000Z
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/BatchTaskCreateInDTO.py
|
yuanyi-thu/AIOT-
|
27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e
|
[
"BSD-3-Clause"
] | 118
|
2018-10-29T08:43:57.000Z
|
2022-01-07T06:49:25.000Z
|
from com.huawei.iotplatform.client.dto.ObjectNode import ObjectNode
from com.huawei.iotplatform.client.dto.TagDTO2 import TagDTO2
class BatchTaskCreateInDTO(object):
tags = TagDTO2
param = ObjectNode
def __init__(self):
self.appId = None
self.taskName = None
self.taskType = None
self.timeout = None
def getAppId(self):
return self.appId
def setAppId(self, appId):
self.appId = appId
def getTaskName(self):
return self.taskName
def setTaskName(self, taskName):
self.taskName = taskName
def getTaskType(self):
return self.taskType
def setTaskType(self, taskType):
self.taskType = taskType
def getTimeout(self):
return self.timeout
def setTimeout(self, timeout):
self.timeout = timeout
def getTags(self):
return self.tags
def setTags(self, tags):
self.tags = tags
def getParam(self):
return self.param
def setParam(self, param):
self.param = param
| 20.98
| 67
| 0.638704
|
from com.huawei.iotplatform.client.dto.ObjectNode import ObjectNode
from com.huawei.iotplatform.client.dto.TagDTO2 import TagDTO2
class BatchTaskCreateInDTO(object):
tags = TagDTO2
param = ObjectNode
def __init__(self):
self.appId = None
self.taskName = None
self.taskType = None
self.timeout = None
def getAppId(self):
return self.appId
def setAppId(self, appId):
self.appId = appId
def getTaskName(self):
return self.taskName
def setTaskName(self, taskName):
self.taskName = taskName
def getTaskType(self):
return self.taskType
def setTaskType(self, taskType):
self.taskType = taskType
def getTimeout(self):
return self.timeout
def setTimeout(self, timeout):
self.timeout = timeout
def getTags(self):
return self.tags
def setTags(self, tags):
self.tags = tags
def getParam(self):
return self.param
def setParam(self, param):
self.param = param
| true
| true
|
1c458e127c7a31bedae9e99bb85864dbcdac3092
| 20,718
|
py
|
Python
|
nova/api/openstack/compute/legacy_v2/contrib/security_groups.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2016-04-28T16:20:38.000Z
|
2021-04-25T11:19:03.000Z
|
nova/api/openstack/compute/legacy_v2/contrib/security_groups.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/legacy_v2/contrib/security_groups.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2020-04-08T20:24:45.000Z
|
2020-10-05T19:02:13.000Z
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import contextlib
from xml.dom import minidom
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
@contextlib.contextmanager
def translate_exceptions():
"""Translate nova exceptions to http exceptions."""
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPForbidden(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a security group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except exception.SecurityGroupNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except Exception as exp:
raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
with translate_exceptions():
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = common.get_instance(self.compute_api, context,
server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance, True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00Z"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
| 40.863905
| 79
| 0.592383
|
import contextlib
from xml.dom import minidom
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
@contextlib.contextmanager
def translate_exceptions():
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPForbidden(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
class SecurityGroupControllerBase(object):
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
def show(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
def index(self, req):
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
def create(self, req, body):
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
def update(self, req, id, body):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except exception.SecurityGroupNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except Exception as exp:
raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
with translate_exceptions():
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
def index(self, req, server_id):
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = common.get_instance(self.compute_api, context,
server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance, True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class Security_groups(extensions.ExtensionDescriptor):
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00Z"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
| true
| true
|
1c458edcdd1b7f3f78cef784442634fe79c4c946
| 78
|
py
|
Python
|
ass_17.py
|
Divyanshi0409/Python-Programs
|
7fb8ab2159cc69de7168bf19f91325b9c7a908c7
|
[
"MIT"
] | null | null | null |
ass_17.py
|
Divyanshi0409/Python-Programs
|
7fb8ab2159cc69de7168bf19f91325b9c7a908c7
|
[
"MIT"
] | null | null | null |
ass_17.py
|
Divyanshi0409/Python-Programs
|
7fb8ab2159cc69de7168bf19f91325b9c7a908c7
|
[
"MIT"
] | null | null | null |
for i in range(50,81):
if i%2==0:
print(i)
else:
break
| 15.6
| 22
| 0.448718
|
for i in range(50,81):
if i%2==0:
print(i)
else:
break
| true
| true
|
1c458f5175cf9bf35887e6e17a55a96733dcd698
| 2,954
|
py
|
Python
|
pint/testing.py
|
fernandezc/pint
|
37a61ede6fbd628c7dc160eb36278cf41c96484c
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testing.py
|
fernandezc/pint
|
37a61ede6fbd628c7dc160eb36278cf41c96484c
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testing.py
|
fernandezc/pint
|
37a61ede6fbd628c7dc160eb36278cf41c96484c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
import math
import warnings
from numbers import Number
from . import Quantity
from .compat import ndarray
try:
import numpy as np
except ImportError:
np = None
def _get_comparable_magnitudes(first, second, msg):
if isinstance(first, Quantity) and isinstance(second, Quantity):
ctx = first._REGISTRY._active_ctx.contexts
if first.is_compatible_with(second, *ctx):
second = second.to(first)
assert first.units == second.units, msg + " Units are not equal."
m1, m2 = first.magnitude, second.magnitude
elif isinstance(first, Quantity):
assert first.dimensionless, msg + " The first is not dimensionless."
first = first.to("")
m1, m2 = first.magnitude, second
elif isinstance(second, Quantity):
assert second.dimensionless, msg + " The second is not dimensionless."
second = second.to("")
m1, m2 = first, second.magnitude
else:
m1, m2 = first, second
return m1, m2
def assert_equal(first, second, msg=None):
if msg is None:
msg = "Comparing %r and %r. " % (first, second)
m1, m2 = _get_comparable_magnitudes(first, second, msg)
msg += " (Converted to %r and %r): Magnitudes are not equal" % (m1, m2)
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_array_equal(m1, m2, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn(RuntimeWarning)
return
elif not isinstance(m2, Number):
warnings.warn(RuntimeWarning)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
elif math.isnan(m2):
assert math.isnan(m1), msg
else:
assert m1 == m2, msg
def assert_allclose(first, second, rtol=1e-07, atol=0, msg=None):
if msg is None:
try:
msg = "Comparing %r and %r. " % (first, second)
except TypeError:
try:
msg = "Comparing %s and %s. " % (first, second)
except Exception:
msg = "Comparing"
m1, m2 = _get_comparable_magnitudes(first, second, msg)
msg += " (Converted to %r and %r)" % (m1, m2)
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn(RuntimeWarning)
return
elif not isinstance(m2, Number):
warnings.warn(RuntimeWarning)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
elif math.isnan(m2):
assert math.isnan(m1), msg
elif math.isinf(m1):
assert math.isinf(m2), msg
elif math.isinf(m2):
assert math.isinf(m1), msg
else:
# Numpy version (don't like because is not symmetric)
# assert abs(m1 - m2) <= atol + rtol * abs(m2), msg
assert abs(m1 - m2) <= max(rtol * max(abs(m1), abs(m2)), atol), msg
| 31.763441
| 78
| 0.618822
|
from __future__ import annotations
import math
import warnings
from numbers import Number
from . import Quantity
from .compat import ndarray
try:
import numpy as np
except ImportError:
np = None
def _get_comparable_magnitudes(first, second, msg):
if isinstance(first, Quantity) and isinstance(second, Quantity):
ctx = first._REGISTRY._active_ctx.contexts
if first.is_compatible_with(second, *ctx):
second = second.to(first)
assert first.units == second.units, msg + " Units are not equal."
m1, m2 = first.magnitude, second.magnitude
elif isinstance(first, Quantity):
assert first.dimensionless, msg + " The first is not dimensionless."
first = first.to("")
m1, m2 = first.magnitude, second
elif isinstance(second, Quantity):
assert second.dimensionless, msg + " The second is not dimensionless."
second = second.to("")
m1, m2 = first, second.magnitude
else:
m1, m2 = first, second
return m1, m2
def assert_equal(first, second, msg=None):
if msg is None:
msg = "Comparing %r and %r. " % (first, second)
m1, m2 = _get_comparable_magnitudes(first, second, msg)
msg += " (Converted to %r and %r): Magnitudes are not equal" % (m1, m2)
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_array_equal(m1, m2, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn(RuntimeWarning)
return
elif not isinstance(m2, Number):
warnings.warn(RuntimeWarning)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
elif math.isnan(m2):
assert math.isnan(m1), msg
else:
assert m1 == m2, msg
def assert_allclose(first, second, rtol=1e-07, atol=0, msg=None):
if msg is None:
try:
msg = "Comparing %r and %r. " % (first, second)
except TypeError:
try:
msg = "Comparing %s and %s. " % (first, second)
except Exception:
msg = "Comparing"
m1, m2 = _get_comparable_magnitudes(first, second, msg)
msg += " (Converted to %r and %r)" % (m1, m2)
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn(RuntimeWarning)
return
elif not isinstance(m2, Number):
warnings.warn(RuntimeWarning)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
elif math.isnan(m2):
assert math.isnan(m1), msg
elif math.isinf(m1):
assert math.isinf(m2), msg
elif math.isinf(m2):
assert math.isinf(m1), msg
else:
# assert abs(m1 - m2) <= atol + rtol * abs(m2), msg
assert abs(m1 - m2) <= max(rtol * max(abs(m1), abs(m2)), atol), msg
| true
| true
|
1c458f9ed188a3d53e4a024d3cb10478bdd12173
| 4,733
|
py
|
Python
|
sudoku/sudoku/gensudoku.py
|
PoojithRachakada/sudoku-django
|
723de992821e54b63259c00fb949fdfa1e05ac04
|
[
"MIT"
] | null | null | null |
sudoku/sudoku/gensudoku.py
|
PoojithRachakada/sudoku-django
|
723de992821e54b63259c00fb949fdfa1e05ac04
|
[
"MIT"
] | 5
|
2020-12-31T09:42:57.000Z
|
2021-01-05T13:59:14.000Z
|
sudoku/sudoku/gensudoku.py
|
PoojithRachakada/sudoku-django
|
723de992821e54b63259c00fb949fdfa1e05ac04
|
[
"MIT"
] | null | null | null |
# pylint: disable=unused-variable
import os
import sys
from io import BytesIO, IOBase
import math
import itertools as ITER
from collections import defaultdict as D
from collections import Counter as CO
from collections import deque as Q
import threading
from functools import lru_cache, reduce
from functools import cmp_to_key as CMP
from bisect import bisect_left as BL
from bisect import bisect_right as BR
import random as RA
import cmath, time
# ? Variables
MOD = (10 ** 9) + 7
MA = float("inf")
MI = float("-inf")
# * gui will be here
# * backend code for sudoku
start_time = time.time()
class Sudoku:
def check_row(self, i, board):
values = set()
for k in range(0, 9):
p = board[i][k]
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
def check_col(self, j, board):
values = set()
for k in range(0, 9):
p = board[k][j]
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
def check_sgrid(self, i, j, board):
x, y = i // 3, j // 3
has = set()
for i in range(3):
for j in range(3):
ele = board[x + i][y + j]
if ele in has:
return False
has.add(ele)
return True
def IsValidSudoku(self, board):
def check_sub_grid(i, j):
values = set()
for m in range(i, i + 3):
for n in range(j, j + 3):
p = board[n][m]
if m == n:
if not self.check_row(m, board):
return False
if not self.check_col(n, board):
return False
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
for i in range(0, 9, 3):
for j in range(0, 9, 3):
if not check_sub_grid(i, j):
return False
return True
# * this is the sudoku generator
def Sudoku_generator(self, board):
def next_pos(grid, store):
for i in range(9):
for j in range(9):
if grid[i][j] == 0:
store[0] = i
store[1] = j
return True
return False
def create(grid, row, col):
for i in range(9):
for j in range(9):
w = grid[i][j]
if w != 0:
row[i].add(w)
col[j].add(w)
def is_valid(i, j, key, row, col, grid):
if key in row[i]:
return False
if key in col[j]:
return False
p = (i // 3) * 3
q = (j // 3) * 3
for x in range(3):
for y in range(3):
if grid[x + p][y + q] == key:
return False
return True
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]
RA.shuffle(arr)
def sudoku_solver(row, col, grid):
store = [0, 0]
if not next_pos(grid, store):
return True
r = store[0]
c = store[1]
for i in arr:
if is_valid(r, c, i, row, col, grid):
grid[r][c] = i
row[r].add(i)
col[c].add(i)
if sudoku_solver(row, col, grid):
return True
grid[r][c] = 0
row[r].remove(i)
col[c].remove(i)
return False
row = D(set)
col = D(set)
create(board, row, col)
sudoku_solver(row, col, board)
return board
def question(board):
hint = RA.randint(18, 30)
totalpos = [(i, j) for i in range(9) for j in range(9)]
wanted = RA.choices(totalpos, k=hint)
qs = [[0] * 9 for i in range(9)]
for i, j in wanted:
qs[i][j] = board[i][j]
return qs
def valid(arr):
sudokuobj = Sudoku()
return sudokuobj.IsValidSudoku(arr)
def all():
board = [[0] * 9 for i in range(9)]
sudokuobj = Sudoku()
ans = sudokuobj.Sudoku_generator((board))
return question(ans), ans
| 27.358382
| 60
| 0.427002
|
import os
import sys
from io import BytesIO, IOBase
import math
import itertools as ITER
from collections import defaultdict as D
from collections import Counter as CO
from collections import deque as Q
import threading
from functools import lru_cache, reduce
from functools import cmp_to_key as CMP
from bisect import bisect_left as BL
from bisect import bisect_right as BR
import random as RA
import cmath, time
MOD = (10 ** 9) + 7
MA = float("inf")
MI = float("-inf")
start_time = time.time()
class Sudoku:
def check_row(self, i, board):
values = set()
for k in range(0, 9):
p = board[i][k]
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
def check_col(self, j, board):
values = set()
for k in range(0, 9):
p = board[k][j]
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
def check_sgrid(self, i, j, board):
x, y = i // 3, j // 3
has = set()
for i in range(3):
for j in range(3):
ele = board[x + i][y + j]
if ele in has:
return False
has.add(ele)
return True
def IsValidSudoku(self, board):
def check_sub_grid(i, j):
values = set()
for m in range(i, i + 3):
for n in range(j, j + 3):
p = board[n][m]
if m == n:
if not self.check_row(m, board):
return False
if not self.check_col(n, board):
return False
if p == 0:
continue
if p in values:
return False
values.add(p)
return True
for i in range(0, 9, 3):
for j in range(0, 9, 3):
if not check_sub_grid(i, j):
return False
return True
def Sudoku_generator(self, board):
def next_pos(grid, store):
for i in range(9):
for j in range(9):
if grid[i][j] == 0:
store[0] = i
store[1] = j
return True
return False
def create(grid, row, col):
for i in range(9):
for j in range(9):
w = grid[i][j]
if w != 0:
row[i].add(w)
col[j].add(w)
def is_valid(i, j, key, row, col, grid):
if key in row[i]:
return False
if key in col[j]:
return False
p = (i // 3) * 3
q = (j // 3) * 3
for x in range(3):
for y in range(3):
if grid[x + p][y + q] == key:
return False
return True
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]
RA.shuffle(arr)
def sudoku_solver(row, col, grid):
store = [0, 0]
if not next_pos(grid, store):
return True
r = store[0]
c = store[1]
for i in arr:
if is_valid(r, c, i, row, col, grid):
grid[r][c] = i
row[r].add(i)
col[c].add(i)
if sudoku_solver(row, col, grid):
return True
grid[r][c] = 0
row[r].remove(i)
col[c].remove(i)
return False
row = D(set)
col = D(set)
create(board, row, col)
sudoku_solver(row, col, board)
return board
def question(board):
hint = RA.randint(18, 30)
totalpos = [(i, j) for i in range(9) for j in range(9)]
wanted = RA.choices(totalpos, k=hint)
qs = [[0] * 9 for i in range(9)]
for i, j in wanted:
qs[i][j] = board[i][j]
return qs
def valid(arr):
sudokuobj = Sudoku()
return sudokuobj.IsValidSudoku(arr)
def all():
board = [[0] * 9 for i in range(9)]
sudokuobj = Sudoku()
ans = sudokuobj.Sudoku_generator((board))
return question(ans), ans
| true
| true
|
1c4590d51df3d7bf9eea558bb224c176d93b580d
| 4,832
|
py
|
Python
|
fastmot/utils/visualization.py
|
rafcy/FastMOT
|
9aee101b1ac83a5fea8cece1f8cfda8030adb743
|
[
"MIT"
] | null | null | null |
fastmot/utils/visualization.py
|
rafcy/FastMOT
|
9aee101b1ac83a5fea8cece1f8cfda8030adb743
|
[
"MIT"
] | null | null | null |
fastmot/utils/visualization.py
|
rafcy/FastMOT
|
9aee101b1ac83a5fea8cece1f8cfda8030adb743
|
[
"MIT"
] | null | null | null |
import colorsys
import numpy as np
import cv2
GOLDEN_RATIO = 0.618033988749895
def draw_tracks(frame, tracks, show_flow=False, show_cov=False):
for track in tracks:
draw_bbox(frame, track.tlbr, get_color(track.trk_id), 2, str(track.trk_id))
if show_flow:
draw_feature_match(frame, track.prev_keypoints, track.keypoints, (0, 255, 255))
if show_cov:
draw_covariance(frame, track.tlbr, track.state[1])
def draw_detections(frame, detections, color=(255, 255, 255), show_conf=False):
for det in detections:
text = f'{det.label}: {det.conf:.2f}' if show_conf else None
draw_bbox(frame, det.tlbr, color, 1, text)
def draw_klt_bboxes(frame, klt_bboxes, color=(0, 0, 0)):
for tlbr in klt_bboxes:
draw_bbox(frame, tlbr, color, 1)
def draw_tiles(frame, tiles, scale_factor, color=(0, 0, 0)):
for tile in tiles:
tlbr = np.rint(tile * np.tile(scale_factor, 2))
draw_bbox(frame, tlbr, color, 1)
def draw_background_flow(frame, prev_bg_keypoints, bg_keypoints, color=(0, 0, 255)):
draw_feature_match(frame, prev_bg_keypoints, bg_keypoints, color)
def get_color(idx, s=0.8, vmin=0.7):
h = np.fmod(idx * GOLDEN_RATIO, 1.)
v = 1. - np.fmod(idx * GOLDEN_RATIO, 1. - vmin)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return int(255 * b), int(255 * g), int(255 * r)
def draw_bbox(frame, tlbr, color, thickness, text=None):
tlbr = tlbr.astype(int)
tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])
cv2.rectangle(frame, tl, br, color, thickness)
if text is not None:
(text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_DUPLEX, 0.5, 1)
cv2.rectangle(frame, tl, (tl[0] + text_width - 1, tl[1] + text_height - 1),
color, cv2.FILLED)
cv2.putText(frame, text, (tl[0], tl[1] + text_height - 1), cv2.FONT_HERSHEY_DUPLEX,
0.5, 0, 1, cv2.LINE_AA)
def draw_feature_match(frame, prev_pts, cur_pts, color):
if len(cur_pts) > 0:
cur_pts = np.rint(cur_pts).astype(np.int32)
for pt in cur_pts:
cv2.circle(frame, tuple(pt), 1, color, cv2.FILLED)
if len(prev_pts) > 0:
prev_pts = np.rint(prev_pts).astype(np.int32)
for pt1, pt2 in zip(prev_pts, cur_pts):
cv2.line(frame, tuple(pt1), tuple(pt2), color, 1, cv2.LINE_AA)
def draw_covariance(frame, tlbr, covariance):
tlbr = tlbr.astype(int)
tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])
def ellipse(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
# 95% confidence ellipse
vals, vecs = np.sqrt(vals[order] * 5.9915), vecs[:, order]
axes = int(vals[0] + 0.5), int(vals[1] + 0.5)
angle = np.degrees(np.arctan2(vecs[1, 0], vecs[0, 0]))
return axes, angle
axes, angle = ellipse(covariance[:2, :2])
cv2.ellipse(frame, tl, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)
axes, angle = ellipse(covariance[2:4, 2:4])
cv2.ellipse(frame, br, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)
class Visualizer:
def __init__(self,
draw_detections=False,
draw_confidence=False,
draw_covariance=False,
draw_klt=False,
draw_obj_flow=False,
draw_bg_flow=False):
"""Class for visualization.
Parameters
----------
draw_detections : bool, optional
Enable drawing detections.
draw_confidence : bool, optional
Enable drawing detection confidence, ignored if `draw_detections` is disabled.
draw_covariance : bool, optional
Enable drawing Kalman filter position covariance.
draw_klt : bool, optional
Enable drawing KLT bounding boxes.
draw_obj_flow : bool, optional
Enable drawing object flow matches.
draw_bg_flow : bool, optional
Enable drawing background flow matches.
"""
self.draw_detections = draw_detections
self.draw_confidence = draw_confidence
self.draw_covariance = draw_covariance
self.draw_klt = draw_klt
self.draw_obj_flow = draw_obj_flow
self.draw_bg_flow = draw_bg_flow
def render(self, frame, tracks, detections, klt_bboxes, prev_bg_keypoints, bg_keypoints):
"""Render visualizations onto the frame."""
draw_tracks(frame, tracks, show_flow=self.draw_obj_flow, show_cov=self.draw_covariance)
if self.draw_detections:
draw_detections(frame, detections, show_conf=self.draw_confidence)
if self.draw_klt:
draw_klt_bboxes(frame, klt_bboxes)
if self.draw_bg_flow:
draw_background_flow(frame, prev_bg_keypoints, bg_keypoints)
| 37.457364
| 95
| 0.627276
|
import colorsys
import numpy as np
import cv2
GOLDEN_RATIO = 0.618033988749895
def draw_tracks(frame, tracks, show_flow=False, show_cov=False):
for track in tracks:
draw_bbox(frame, track.tlbr, get_color(track.trk_id), 2, str(track.trk_id))
if show_flow:
draw_feature_match(frame, track.prev_keypoints, track.keypoints, (0, 255, 255))
if show_cov:
draw_covariance(frame, track.tlbr, track.state[1])
def draw_detections(frame, detections, color=(255, 255, 255), show_conf=False):
for det in detections:
text = f'{det.label}: {det.conf:.2f}' if show_conf else None
draw_bbox(frame, det.tlbr, color, 1, text)
def draw_klt_bboxes(frame, klt_bboxes, color=(0, 0, 0)):
for tlbr in klt_bboxes:
draw_bbox(frame, tlbr, color, 1)
def draw_tiles(frame, tiles, scale_factor, color=(0, 0, 0)):
for tile in tiles:
tlbr = np.rint(tile * np.tile(scale_factor, 2))
draw_bbox(frame, tlbr, color, 1)
def draw_background_flow(frame, prev_bg_keypoints, bg_keypoints, color=(0, 0, 255)):
draw_feature_match(frame, prev_bg_keypoints, bg_keypoints, color)
def get_color(idx, s=0.8, vmin=0.7):
h = np.fmod(idx * GOLDEN_RATIO, 1.)
v = 1. - np.fmod(idx * GOLDEN_RATIO, 1. - vmin)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return int(255 * b), int(255 * g), int(255 * r)
def draw_bbox(frame, tlbr, color, thickness, text=None):
tlbr = tlbr.astype(int)
tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])
cv2.rectangle(frame, tl, br, color, thickness)
if text is not None:
(text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_DUPLEX, 0.5, 1)
cv2.rectangle(frame, tl, (tl[0] + text_width - 1, tl[1] + text_height - 1),
color, cv2.FILLED)
cv2.putText(frame, text, (tl[0], tl[1] + text_height - 1), cv2.FONT_HERSHEY_DUPLEX,
0.5, 0, 1, cv2.LINE_AA)
def draw_feature_match(frame, prev_pts, cur_pts, color):
if len(cur_pts) > 0:
cur_pts = np.rint(cur_pts).astype(np.int32)
for pt in cur_pts:
cv2.circle(frame, tuple(pt), 1, color, cv2.FILLED)
if len(prev_pts) > 0:
prev_pts = np.rint(prev_pts).astype(np.int32)
for pt1, pt2 in zip(prev_pts, cur_pts):
cv2.line(frame, tuple(pt1), tuple(pt2), color, 1, cv2.LINE_AA)
def draw_covariance(frame, tlbr, covariance):
tlbr = tlbr.astype(int)
tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])
def ellipse(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = np.sqrt(vals[order] * 5.9915), vecs[:, order]
axes = int(vals[0] + 0.5), int(vals[1] + 0.5)
angle = np.degrees(np.arctan2(vecs[1, 0], vecs[0, 0]))
return axes, angle
axes, angle = ellipse(covariance[:2, :2])
cv2.ellipse(frame, tl, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)
axes, angle = ellipse(covariance[2:4, 2:4])
cv2.ellipse(frame, br, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)
class Visualizer:
def __init__(self,
draw_detections=False,
draw_confidence=False,
draw_covariance=False,
draw_klt=False,
draw_obj_flow=False,
draw_bg_flow=False):
self.draw_detections = draw_detections
self.draw_confidence = draw_confidence
self.draw_covariance = draw_covariance
self.draw_klt = draw_klt
self.draw_obj_flow = draw_obj_flow
self.draw_bg_flow = draw_bg_flow
def render(self, frame, tracks, detections, klt_bboxes, prev_bg_keypoints, bg_keypoints):
draw_tracks(frame, tracks, show_flow=self.draw_obj_flow, show_cov=self.draw_covariance)
if self.draw_detections:
draw_detections(frame, detections, show_conf=self.draw_confidence)
if self.draw_klt:
draw_klt_bboxes(frame, klt_bboxes)
if self.draw_bg_flow:
draw_background_flow(frame, prev_bg_keypoints, bg_keypoints)
| true
| true
|
1c4591a6e22722c8a1760289f625d852a5960577
| 2,354
|
py
|
Python
|
tests/io/simple_process.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 25
|
2018-07-11T21:43:56.000Z
|
2021-11-17T11:40:00.000Z
|
tests/io/simple_process.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 62
|
2018-07-05T20:28:52.000Z
|
2021-12-14T09:49:35.000Z
|
tests/io/simple_process.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 15
|
2019-03-27T22:28:47.000Z
|
2021-01-03T20:23:42.000Z
|
"""
Simple process class for purpose of testing.
Created on: Jul 19, 2019
Author: Emily Costa
"""
import h5py
from pyUSID.processing.process import Process
import numpy as np
from pyUSID import hdf_utils
import matplotlib.pyplot as plt
class SimpleProcess(Process):
def __init__(self, h5_main, verbose=True, **kwargs):
super(SimpleProcess, self).__init__(h5_main, verbose, **kwargs)
self.data = None
self.test_data = None
self.results = None
self.chunk_amount = 0
self.process_name = 'Simple_Process'
if self.verbose: print('Done with initializing book-keepings')
def test(self):
if self.mpi_rank > 0:
return
ran_ind = np.random.randint(0, high=self.h5_main.shape[0])
self.test_data = np.fft.fftshift(np.fft.fft(self.h5_main[ran_ind]))
def _create_results_datasets(self):
self.h5_results_grp = hdf_utils.create_results_group(self.h5_main, self.process_name)
assert isinstance(self.h5_results_grp, h5py.Group)
if self.verbose: print('Results group created.')
self.results = hdf_utils.create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
h5_group=self.h5_results_grp)
#self.results = hdf_utils.write_main_dataset(self.h5_results_grp, (self.h5_main.shape[0], 1), "Results", "Results", "Units", None,
#usid.io.write_utils.Dimension('arb', '', [1]), h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals, dtype=np.float32)
if self.verbose: print('Empty main dataset for results written')
def _write_results_chunk(self):
pos_in_batch = self._get_pixels_in_current_batch()
print(type(self.data))
print(type(self.results))
self.results[pos_in_batch, :] = self.data
#self.results = self.h5_results_grp['Simple_Data']
self.chunk_amount = self.chunk_amount + 1
if self.verbose: print('Chunk {} written.'.format(self.chunk_amount))
def _unit_computation(self):
self.data = np.fft.fftshift(np.fft.fft(self.data, axis=1), axes=1)
def plot_test(self):
fig, axis = plt.subplots()
axis.plot(self.test_data)
plt.savefig('test_partial.png')
if self.verbose: print('Test image created.')
| 39.898305
| 149
| 0.666525
|
import h5py
from pyUSID.processing.process import Process
import numpy as np
from pyUSID import hdf_utils
import matplotlib.pyplot as plt
class SimpleProcess(Process):
def __init__(self, h5_main, verbose=True, **kwargs):
super(SimpleProcess, self).__init__(h5_main, verbose, **kwargs)
self.data = None
self.test_data = None
self.results = None
self.chunk_amount = 0
self.process_name = 'Simple_Process'
if self.verbose: print('Done with initializing book-keepings')
def test(self):
if self.mpi_rank > 0:
return
ran_ind = np.random.randint(0, high=self.h5_main.shape[0])
self.test_data = np.fft.fftshift(np.fft.fft(self.h5_main[ran_ind]))
def _create_results_datasets(self):
self.h5_results_grp = hdf_utils.create_results_group(self.h5_main, self.process_name)
assert isinstance(self.h5_results_grp, h5py.Group)
if self.verbose: print('Results group created.')
self.results = hdf_utils.create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
h5_group=self.h5_results_grp)
if self.verbose: print('Empty main dataset for results written')
def _write_results_chunk(self):
pos_in_batch = self._get_pixels_in_current_batch()
print(type(self.data))
print(type(self.results))
self.results[pos_in_batch, :] = self.data
self.chunk_amount = self.chunk_amount + 1
if self.verbose: print('Chunk {} written.'.format(self.chunk_amount))
def _unit_computation(self):
self.data = np.fft.fftshift(np.fft.fft(self.data, axis=1), axes=1)
def plot_test(self):
fig, axis = plt.subplots()
axis.plot(self.test_data)
plt.savefig('test_partial.png')
if self.verbose: print('Test image created.')
| true
| true
|
1c4591b85ef0cb783c72ba1b6a6beb97dbfb0aa3
| 2,482
|
py
|
Python
|
pysnmp/CISCO-SCTP-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-SCTP-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-SCTP-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-SCTP-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SCTP-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:54:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, ModuleIdentity, Bits, MibIdentifier, Gauge32, TimeTicks, NotificationType, iso, IpAddress, Unsigned32, Counter32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "ModuleIdentity", "Bits", "MibIdentifier", "Gauge32", "TimeTicks", "NotificationType", "iso", "IpAddress", "Unsigned32", "Counter32", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ceSctpCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 190))
ceSctpCapability.setRevisions(('2001-06-05 00:00',))
if mibBuilder.loadTexts: ceSctpCapability.setLastUpdated('200106050000Z')
if mibBuilder.loadTexts: ceSctpCapability.setOrganization('Cisco Systems, Inc.')
ceSctpCapabilityV12R021MB1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 190, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ceSctpCapabilityV12R021MB1 = ceSctpCapabilityV12R021MB1.setProductRelease('Cisco IOS 12.2(1)MB1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ceSctpCapabilityV12R021MB1 = ceSctpCapabilityV12R021MB1.setStatus('current')
mibBuilder.exportSymbols("CISCO-SCTP-CAPABILITY", ceSctpCapability=ceSctpCapability, ceSctpCapabilityV12R021MB1=ceSctpCapabilityV12R021MB1, PYSNMP_MODULE_ID=ceSctpCapability)
| 99.28
| 477
| 0.787671
|
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, ModuleIdentity, Bits, MibIdentifier, Gauge32, TimeTicks, NotificationType, iso, IpAddress, Unsigned32, Counter32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "ModuleIdentity", "Bits", "MibIdentifier", "Gauge32", "TimeTicks", "NotificationType", "iso", "IpAddress", "Unsigned32", "Counter32", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ceSctpCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 190))
ceSctpCapability.setRevisions(('2001-06-05 00:00',))
if mibBuilder.loadTexts: ceSctpCapability.setLastUpdated('200106050000Z')
if mibBuilder.loadTexts: ceSctpCapability.setOrganization('Cisco Systems, Inc.')
ceSctpCapabilityV12R021MB1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 190, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ceSctpCapabilityV12R021MB1 = ceSctpCapabilityV12R021MB1.setProductRelease('Cisco IOS 12.2(1)MB1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ceSctpCapabilityV12R021MB1 = ceSctpCapabilityV12R021MB1.setStatus('current')
mibBuilder.exportSymbols("CISCO-SCTP-CAPABILITY", ceSctpCapability=ceSctpCapability, ceSctpCapabilityV12R021MB1=ceSctpCapabilityV12R021MB1, PYSNMP_MODULE_ID=ceSctpCapability)
| true
| true
|
1c45922460b3274c214c472b39912156f5a9ae77
| 1,632
|
py
|
Python
|
game/startMk2.py
|
Penniling/launchpad-dont-choose-the-wrong
|
490e814a531168ae3b4cbbd0db89a9887b5d0bb3
|
[
"MIT"
] | null | null | null |
game/startMk2.py
|
Penniling/launchpad-dont-choose-the-wrong
|
490e814a531168ae3b4cbbd0db89a9887b5d0bb3
|
[
"MIT"
] | null | null | null |
game/startMk2.py
|
Penniling/launchpad-dont-choose-the-wrong
|
490e814a531168ae3b4cbbd0db89a9887b5d0bb3
|
[
"MIT"
] | null | null | null |
import LaunchpadMk2
import atexit
import os
import random
def on_exit():
os.system(f"python {os.getcwd()}/startMk2.py")
class Game:
def __init__(self):
self.n = int(input("Please choose a number of wrong pads: "))
self.lp = LaunchpadMk2.LaunchpadMk2()
self.lp.Reset()
self.lp.register_on_button_press(on_button=self.on_button_press)
self.wrong = []
self.pres = []
self.isDead = False
self.start_game()
def start_game(self):
self.lp.LedAllOn(colorcode=self.lp.COLORS["green"])
for i in range(self.n):
x = (random.randint(0, 7), random.randint(1, 8))
while x in self.wrong:
x = (random.randint(0, 7), random.randint(1, 8))
self.wrong.append(x)
while len(self.pres) <= 63:
pass
self.on_win()
def on_button_press(self, x, y, pres):
if pres > 0 and (x, y) != self.pres:
if (x, y) in self.wrong:
self.on_death()
else:
self.pres.append((x, y))
self.lp.LedCtrlXY(x, y, 0, 0, 0)
def on_win(self):
self.lp.Reset()
self.lp.LedCtrlString("Win", 0, 255, 0, direction=self.lp.SCROLL_LEFT, waitms=50)
self.lp.continue_listener = False
self.lp.Close()
exit()
def on_death(self):
self.lp.Reset()
for i in self.wrong:
self.lp.LedCtrlXY(i[0], i[1], 255, 0, 0)
self.lp.continue_listener = False
self.lp.Close()
exit()
if __name__ == "__main__":
atexit.register(on_exit)
Game()
| 26.754098
| 89
| 0.550858
|
import LaunchpadMk2
import atexit
import os
import random
def on_exit():
os.system(f"python {os.getcwd()}/startMk2.py")
class Game:
def __init__(self):
self.n = int(input("Please choose a number of wrong pads: "))
self.lp = LaunchpadMk2.LaunchpadMk2()
self.lp.Reset()
self.lp.register_on_button_press(on_button=self.on_button_press)
self.wrong = []
self.pres = []
self.isDead = False
self.start_game()
def start_game(self):
self.lp.LedAllOn(colorcode=self.lp.COLORS["green"])
for i in range(self.n):
x = (random.randint(0, 7), random.randint(1, 8))
while x in self.wrong:
x = (random.randint(0, 7), random.randint(1, 8))
self.wrong.append(x)
while len(self.pres) <= 63:
pass
self.on_win()
def on_button_press(self, x, y, pres):
if pres > 0 and (x, y) != self.pres:
if (x, y) in self.wrong:
self.on_death()
else:
self.pres.append((x, y))
self.lp.LedCtrlXY(x, y, 0, 0, 0)
def on_win(self):
self.lp.Reset()
self.lp.LedCtrlString("Win", 0, 255, 0, direction=self.lp.SCROLL_LEFT, waitms=50)
self.lp.continue_listener = False
self.lp.Close()
exit()
def on_death(self):
self.lp.Reset()
for i in self.wrong:
self.lp.LedCtrlXY(i[0], i[1], 255, 0, 0)
self.lp.continue_listener = False
self.lp.Close()
exit()
if __name__ == "__main__":
atexit.register(on_exit)
Game()
| true
| true
|
1c4592dbfd3957588d06fd935ce4c485dc1377a0
| 7,268
|
py
|
Python
|
pennylane/interfaces/batch/tensorflow.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | 712
|
2020-07-29T03:46:52.000Z
|
2022-03-27T11:21:51.000Z
|
pennylane/interfaces/batch/tensorflow.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | 1,627
|
2020-07-28T13:07:58.000Z
|
2022-03-31T21:47:29.000Z
|
pennylane/interfaces/batch/tensorflow.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | 249
|
2020-07-29T03:26:18.000Z
|
2022-03-31T19:59:48.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains functions for adding the TensorFlow interface
to a PennyLane Device class.
"""
# pylint: disable=too-many-arguments,too-many-branches
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
import pennylane as qml
def _compute_vjp(dy, jacs):
# compute the vector-Jacobian product dy @ jac
# for a list of dy's and Jacobian matrices.
vjps = []
for d, jac in zip(dy, jacs):
vjp = qml.gradients.compute_vjp(d, jac)
if not context.executing_eagerly():
vjp = qml.math.unstack(vjp)
vjps.extend(vjp)
return vjps
def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2):
"""Execute a batch of tapes with TensorFlow parameters on a device.
Args:
tapes (Sequence[.QuantumTape]): batch of tapes to execute
device (.Device): Device to use to execute the batch of tapes.
If the device does not provide a ``batch_execute`` method,
by default the tapes will be executed in serial.
execute_fn (callable): The execution function used to execute the tapes
during the forward pass. This function must return a tuple ``(results, jacobians)``.
If ``jacobians`` is an empty list, then ``gradient_fn`` is used to
compute the gradients during the backwards pass.
gradient_kwargs (dict): dictionary of keyword arguments to pass when
determining the gradients of tapes
gradient_fn (callable): the gradient function to use to compute quantum gradients
_n (int): a positive integer used to track nesting of derivatives, for example
if the nth-order derivative is requested.
max_diff (int): If ``gradient_fn`` is a gradient transform, this option specifies
the maximum number of derivatives to support. Increasing this value allows
for higher order derivatives to be extracted, at the cost of additional
(classical) computational overhead during the backwards pass.
Returns:
list[list[tf.Tensor]]: A nested list of tape results. Each element in
the returned list corresponds in order to the provided tapes.
"""
parameters = []
params_unwrapped = []
for i, tape in enumerate(tapes):
# store the trainable parameters
params = tape.get_parameters(trainable_only=False)
tape.trainable_params = qml.math.get_trainable_indices(params)
parameters += [p for i, p in enumerate(params) if i in tape.trainable_params]
# store all unwrapped parameters
params_unwrapped.append(
[i.numpy() if isinstance(i, (tf.Variable, tf.Tensor)) else i for i in params]
)
with qml.tape.Unwrap(*tapes, set_trainable=False):
# Forward pass: execute the tapes
res, jacs = execute_fn(tapes, **gradient_kwargs)
for i, tape in enumerate(tapes):
# convert output to TensorFlow tensors
r = np.hstack(res[i]) if res[i].dtype == np.dtype("object") else res[i]
res[i] = tf.convert_to_tensor(r)
@tf.custom_gradient
def _execute(*parameters): # pylint:disable=unused-argument
def grad_fn(*dy, **tfkwargs):
"""Returns the vector-Jacobian product with given
parameter values and output gradient dy"""
dy = [qml.math.T(d) for d in dy]
if jacs:
# Jacobians were computed on the forward pass (mode="forward")
# No additional quantum evaluations needed; simply compute the VJPs directly.
vjps = _compute_vjp(dy, jacs)
else:
# Need to compute the Jacobians on the backward pass (accumulation="backward")
if isinstance(gradient_fn, qml.gradients.gradient_transform):
# Gradient function is a gradient transform.
# Generate and execute the required gradient tapes
if _n == max_diff or not context.executing_eagerly():
with qml.tape.Unwrap(*tapes, params=params_unwrapped, set_trainable=False):
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
tapes,
dy,
gradient_fn,
reduction=lambda vjps, x: vjps.extend(qml.math.unstack(x)),
gradient_kwargs=gradient_kwargs,
)
vjps = processing_fn(execute_fn(vjp_tapes)[0])
else:
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
tapes,
dy,
gradient_fn,
reduction="extend",
gradient_kwargs=gradient_kwargs,
)
# This is where the magic happens. Note that we call ``execute``.
# This recursion, coupled with the fact that the gradient transforms
# are differentiable, allows for arbitrary order differentiation.
vjps = processing_fn(
execute(
vjp_tapes,
device,
execute_fn,
gradient_fn,
gradient_kwargs,
_n=_n + 1,
max_diff=max_diff,
)
)
else:
# Gradient function is not a gradient transform
# (e.g., it might be a device method).
# Note that unlike the previous branch:
#
# - there is no recursion here
# - gradient_fn is not differentiable
#
# so we cannot support higher-order derivatives.
with qml.tape.Unwrap(*tapes, params=params_unwrapped, set_trainable=False):
vjps = _compute_vjp(dy, gradient_fn(tapes, **gradient_kwargs))
variables = tfkwargs.get("variables", None)
return (vjps, variables) if variables is not None else vjps
return res, grad_fn
return _execute(*parameters)
| 43.261905
| 100
| 0.569895
|
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
import pennylane as qml
def _compute_vjp(dy, jacs):
vjps = []
for d, jac in zip(dy, jacs):
vjp = qml.gradients.compute_vjp(d, jac)
if not context.executing_eagerly():
vjp = qml.math.unstack(vjp)
vjps.extend(vjp)
return vjps
def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2):
parameters = []
params_unwrapped = []
for i, tape in enumerate(tapes):
# store the trainable parameters
params = tape.get_parameters(trainable_only=False)
tape.trainable_params = qml.math.get_trainable_indices(params)
parameters += [p for i, p in enumerate(params) if i in tape.trainable_params]
# store all unwrapped parameters
params_unwrapped.append(
[i.numpy() if isinstance(i, (tf.Variable, tf.Tensor)) else i for i in params]
)
with qml.tape.Unwrap(*tapes, set_trainable=False):
# Forward pass: execute the tapes
res, jacs = execute_fn(tapes, **gradient_kwargs)
for i, tape in enumerate(tapes):
# convert output to TensorFlow tensors
r = np.hstack(res[i]) if res[i].dtype == np.dtype("object") else res[i]
res[i] = tf.convert_to_tensor(r)
@tf.custom_gradient
def _execute(*parameters): # pylint:disable=unused-argument
def grad_fn(*dy, **tfkwargs):
dy = [qml.math.T(d) for d in dy]
if jacs:
# Jacobians were computed on the forward pass (mode="forward")
# No additional quantum evaluations needed; simply compute the VJPs directly.
vjps = _compute_vjp(dy, jacs)
else:
# Need to compute the Jacobians on the backward pass (accumulation="backward")
if isinstance(gradient_fn, qml.gradients.gradient_transform):
# Gradient function is a gradient transform.
# Generate and execute the required gradient tapes
if _n == max_diff or not context.executing_eagerly():
with qml.tape.Unwrap(*tapes, params=params_unwrapped, set_trainable=False):
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
tapes,
dy,
gradient_fn,
reduction=lambda vjps, x: vjps.extend(qml.math.unstack(x)),
gradient_kwargs=gradient_kwargs,
)
vjps = processing_fn(execute_fn(vjp_tapes)[0])
else:
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
tapes,
dy,
gradient_fn,
reduction="extend",
gradient_kwargs=gradient_kwargs,
)
# This is where the magic happens. Note that we call ``execute``.
# This recursion, coupled with the fact that the gradient transforms
# are differentiable, allows for arbitrary order differentiation.
vjps = processing_fn(
execute(
vjp_tapes,
device,
execute_fn,
gradient_fn,
gradient_kwargs,
_n=_n + 1,
max_diff=max_diff,
)
)
else:
# Gradient function is not a gradient transform
# (e.g., it might be a device method).
# Note that unlike the previous branch:
#
# - there is no recursion here
# - gradient_fn is not differentiable
#
# so we cannot support higher-order derivatives.
with qml.tape.Unwrap(*tapes, params=params_unwrapped, set_trainable=False):
vjps = _compute_vjp(dy, gradient_fn(tapes, **gradient_kwargs))
variables = tfkwargs.get("variables", None)
return (vjps, variables) if variables is not None else vjps
return res, grad_fn
return _execute(*parameters)
| true
| true
|
1c459309ba1a81398fc095a2ca8f6f6f4053e120
| 990
|
py
|
Python
|
linkv_sdk/config/bindings/ffi.py
|
linkv-io/python2-sdk
|
45699372ffcf6e3e745d870cfca004fc885ee15f
|
[
"Apache-2.0"
] | null | null | null |
linkv_sdk/config/bindings/ffi.py
|
linkv-io/python2-sdk
|
45699372ffcf6e3e745d870cfca004fc885ee15f
|
[
"Apache-2.0"
] | null | null | null |
linkv_sdk/config/bindings/ffi.py
|
linkv-io/python2-sdk
|
45699372ffcf6e3e745d870cfca004fc885ee15f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import platform
import os
from requests import get
from tempfile import gettempdir
from ctypes import CDLL
def _platform_file(name):
ext = ''
if platform.uname()[0] == "Linux":
ext = 'so'
elif platform.uname()[0] == "Darwin":
ext = 'dylib'
elif platform.uname()[0] == "Windows":
ext = 'dll'
return "lib{}.{}".format(name, ext)
def dlopen_platform_specific(name, path):
return CDLL('{}/{}'.format(gettempdir() if path == "" else path, _platform_file(name)))
DownloadURL = 'http://dl.linkv.fun/static/server'
def download(name, path, version):
filepath = '{}/{}'.format(gettempdir() if path == "" else path, _platform_file(name))
if os.path.exists(filepath):
return True
r = get('{}/{}/{}'.format(DownloadURL, version, _platform_file(name)))
if r.status_code != 200:
return False
with open(filepath, 'wb') as f:
f.write(r.content)
r.close()
return True
| 22
| 91
| 0.611111
|
import platform
import os
from requests import get
from tempfile import gettempdir
from ctypes import CDLL
def _platform_file(name):
ext = ''
if platform.uname()[0] == "Linux":
ext = 'so'
elif platform.uname()[0] == "Darwin":
ext = 'dylib'
elif platform.uname()[0] == "Windows":
ext = 'dll'
return "lib{}.{}".format(name, ext)
def dlopen_platform_specific(name, path):
return CDLL('{}/{}'.format(gettempdir() if path == "" else path, _platform_file(name)))
DownloadURL = 'http://dl.linkv.fun/static/server'
def download(name, path, version):
filepath = '{}/{}'.format(gettempdir() if path == "" else path, _platform_file(name))
if os.path.exists(filepath):
return True
r = get('{}/{}/{}'.format(DownloadURL, version, _platform_file(name)))
if r.status_code != 200:
return False
with open(filepath, 'wb') as f:
f.write(r.content)
r.close()
return True
| true
| true
|
1c4595dae899b6160a00fb35d2139755cf007c2b
| 2,254
|
py
|
Python
|
backend/pyrogram/raw/functions/messages/get_attached_stickers.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/raw/functions/messages/get_attached_stickers.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/raw/functions/messages/get_attached_stickers.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetAttachedStickers(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``123``
- ID: ``0xcc5b67cc``
Parameters:
media: :obj:`InputStickeredMedia <pyrogram.raw.base.InputStickeredMedia>`
Returns:
List of :obj:`StickerSetCovered <pyrogram.raw.base.StickerSetCovered>`
"""
__slots__: List[str] = ["media"]
ID = 0xcc5b67cc
QUALNAME = "functions.messages.GetAttachedStickers"
def __init__(self, *, media: "raw.base.InputStickeredMedia") -> None:
self.media = media # InputStickeredMedia
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetAttachedStickers":
# No flags
media = TLObject.read(data)
return GetAttachedStickers(media=media)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(self.media.write())
return data.getvalue()
| 31.305556
| 103
| 0.645519
|
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
| true
| true
|
1c459686e0c6196509dccaf4fcbecf5fdc393fc7
| 41,713
|
py
|
Python
|
xform/models.py
|
alisonamerico/Django-XForm
|
ad2e96455307b57ef3c485a006db478fe4352a36
|
[
"MIT"
] | 3
|
2019-07-25T14:46:14.000Z
|
2020-12-14T22:43:46.000Z
|
xform/models.py
|
alisonamerico/Django-XForm
|
ad2e96455307b57ef3c485a006db478fe4352a36
|
[
"MIT"
] | 4
|
2019-09-04T17:39:04.000Z
|
2021-11-05T23:14:58.000Z
|
xform/models.py
|
alisonamerico/Django-XForm
|
ad2e96455307b57ef3c485a006db478fe4352a36
|
[
"MIT"
] | 1
|
2021-11-05T23:05:48.000Z
|
2021-11-05T23:05:48.000Z
|
import csv
import json
import mimetypes
import os
import random
import re
import requests
import xlrd
from contextlib import closing
from hashlib import md5
from io import BytesIO
from io import StringIO
from pyxform import SurveyElementBuilder
from pyxform.builder import create_survey_element_from_dict
from pyxform.utils import has_external_choices
from pyxform.xform2json import create_survey_element_from_xml
from pyxform.xls2json import parse_file_to_json
from xml.dom import Node
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection, Point
from django.core.exceptions import ValidationError
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import URLValidator
from django.db.models.signals import post_save
from django.utils import timezone
from .tags import (
UUID, ID, ATTACHMENTS, STATUS, NOTES, VERSION, DURATION, XFORM_ID_STRING,
XFORM_ID, GEOLOCATION, SUBMITTED_BY, SUBMISSION_TIME, TOTAL_MEDIA,
MEDIA_COUNT, MEDIA_ALL_RECEIVED, EDITED, LAST_EDITED, KNOWN_MEDIA_TYPES,
START, END
)
from .utils import (
get_values_matching_key, get_uuid_from_xml, set_uuid, XFormInstanceParser,
clean_and_parse_xml, get_numeric_fields, numeric_checker,
_get_tag_or_element_type_xpath, calculate_duration
)
if 'postg' in settings.DATABASES['default']['ENGINE']:
from django.contrib.postgres.fields import JSONField
else:
from jsonfield import JSONField
CHUNK_SIZE = 1024
XFORM_TITLE_LENGTH = 255
title_pattern = re.compile(r"<h:title>(.*?)</h:title>")
def contains_xml_invalid_char(text, invalids=['&', '>', '<']):
"""Check whether 'text' contains ANY invalid xml chars"""
return 1 in [c in text for c in invalids]
def convert_to_serializable_date(date):
if hasattr(date, 'isoformat'):
return date.isoformat()
return date
def _get_attachments_from_instance(instance):
attachments = []
for a in instance.attachments.all():
attachment = dict()
attachment['download_url'] = a.media_file.url
attachment['small_download_url'] = a.media_file.url
attachment['medium_download_url'] = a.media_file.url
attachment['mimetype'] = a.mimetype
attachment['filename'] = a.media_file.name
attachment['name'] = a.name
attachment['instance'] = a.instance.pk
attachment['xform'] = instance.xform.id
attachment['id'] = a.id
attachments.append(attachment)
return attachments
def get_default_content_type():
content_object, created = ContentType.objects.get_or_create(
app_label="xform", model='xform')
return content_object.id
def upload_to(instance, filename):
try:
return os.path.join(
instance.user.username, 'xls',
os.path.split(filename)[1])
except Exception:
folder = "{}_{}".format(instance.instance.xform.id,
instance.instance.xform.id_string)
return os.path.join(
instance.instance.xform.user.username, 'attachments', folder,
os.path.split(filename)[1])
class XLSFormError(Exception):
pass
class FormInactiveError(Exception):
pass
class XForm(models.Model):
dynamic_choices = True
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default=u'')
description = models.TextField(default=u'', null=True, blank=True)
xml = models.TextField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='xforms', null=True, on_delete=models.CASCADE)
id_string = models.SlugField(
editable=False,
verbose_name="ID",
max_length=100)
title = models.CharField(editable=False, max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
last_submission_time = models.DateTimeField(blank=True, null=True)
has_start_time = models.BooleanField(default=False)
uuid = models.CharField(max_length=36, default=u'')
uuid_regex = re.compile(r'(<instance>.*?id="[^"]+">)(.*</instance>)(.*)',
re.DOTALL)
instance_id_regex = re.compile(r'<instance>.*?id="([^"]+)".*</instance>',
re.DOTALL)
instances_with_geopoints = models.BooleanField(default=False)
num_of_submissions = models.IntegerField(default=0)
version = models.CharField(
max_length=255, null=True, blank=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE)
metadata_set = GenericRelation(
'MetaData',
content_type_field='content_type_id',
object_id_field="object_id")
has_hxl_support = models.BooleanField(default=False)
last_updated_at = models.DateTimeField(auto_now=True)
hash = models.CharField("Hash", max_length=36, blank=True, null=True,
default=None)
class Meta:
unique_together = ("user", "id_string",)
verbose_name = "XForm"
verbose_name_plural = "XForms"
ordering = ("pk", )
def get_osm_survey_xpaths(self):
"""
Returns abbreviated_xpath for OSM question types in the survey.
"""
return [
elem.get_abbreviated_xpath()
for elem in self.get_survey_elements_of_type('osm')]
def get_media_survey_xpaths(self):
return [
e.get_abbreviated_xpath()
for e in sum([
self.get_survey_elements_of_type(m) for m in KNOWN_MEDIA_TYPES
], [])
]
def file_name(self):
return self.id_string + ".xml"
def get_survey_elements_of_type(self, element_type):
return [
e for e in self.get_survey_elements() if e.type == element_type
]
def _set_uuid_in_xml(self, file_name=None):
"""
Add bind to automatically set UUID node in XML.
"""
if not file_name:
file_name = self.file_name()
file_name, file_ext = os.path.splitext(file_name)
doc = clean_and_parse_xml(self.xml)
model_nodes = doc.getElementsByTagName("model")
if len(model_nodes) != 1:
raise Exception(u"xml contains multiple model nodes")
model_node = model_nodes[0]
instance_nodes = [
node for node in model_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
node.tagName.lower() == "instance" and not node.hasAttribute("id")
]
if len(instance_nodes) != 1:
raise Exception("Multiple instance nodes without the id "
"attribute, can't tell which is the main one")
instance_node = instance_nodes[0]
# get the first child whose id attribute matches our id_string
survey_nodes = [
node for node in instance_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
(node.tagName == file_name or node.attributes.get('id'))
]
if len(survey_nodes) != 1:
raise Exception(
"Multiple survey nodes with the id '%s'" % self.id_string)
survey_node = survey_nodes[0]
formhub_nodes = [
n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName == "formhub"
]
if len(formhub_nodes) > 1:
raise Exception(
"Multiple formhub nodes within main instance node")
elif len(formhub_nodes) == 1:
formhub_node = formhub_nodes[0]
else:
formhub_node = survey_node.insertBefore(
doc.createElement("formhub"), survey_node.firstChild)
uuid_nodes = [
node for node in formhub_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "uuid"
]
if len(uuid_nodes) == 0:
formhub_node.appendChild(doc.createElement("uuid"))
if len(formhub_nodes) == 0:
# append the calculate bind node
calculate_node = doc.createElement("bind")
calculate_node.setAttribute(
"nodeset", "/%s/formhub/uuid" % survey_node.tagName)
calculate_node.setAttribute("type", "string")
calculate_node.setAttribute("calculate", "'%s'" % self.uuid)
model_node.appendChild(calculate_node)
self.xml = doc.toprettyxml(indent=" ", encoding='utf-8')
# hack
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\
# and-silly-whitespace/
text_re = re.compile('(>)\n\s*(\s[^<>\s].*?)\n\s*(\s</)', re.DOTALL)
output_re = re.compile('\n.*(<output.*>)\n( )*')
pretty_xml = text_re.sub(lambda m: ''.join(m.group(1, 2, 3)),
self.xml.decode('utf-8'))
inline_output = output_re.sub('\g<1>', pretty_xml)
inline_output = re.compile('<label>\s*\n*\s*\n*\s*</label>').sub(
'<label></label>', inline_output)
self.xml = inline_output
def _mark_start_time_boolean(self):
starttime_substring = 'jr:preloadParams="start"'
if self.xml.find(starttime_substring) != -1:
self.has_start_time = True
else:
self.has_start_time = False
def _id_string_already_exists_in_account(self, id_string):
try:
XForm.objects.get(id_string__iexact=id_string)
except XForm.DoesNotExist:
return False
return True
def get_unique_id_string(self, id_string, count=0):
# used to generate a new id_string for new data_dictionary object if
# id_string already existed
if self._id_string_already_exists_in_account(id_string):
if count != 0:
if re.match(r'\w+_\d+$', id_string):
a = id_string.split('_')
id_string = "_".join(a[:-1])
count += 1
id_string = "{}_{}".format(id_string, count)
return self.get_unique_id_string(id_string, count)
return id_string
def _set_title(self):
xml = re.sub(r"\s+", " ", self.xml)
matches = title_pattern.findall(xml)
if len(matches) != 1:
raise XLSFormError(("There should be a single title."), matches)
if matches:
title_xml = matches[0][:XFORM_TITLE_LENGTH]
else:
title_xml = self.title[:XFORM_TITLE_LENGTH] if self.title else ''
if self.title and title_xml != self.title:
title_xml = self.title[:XFORM_TITLE_LENGTH]
if isinstance(self.xml, bytes):
self.xml = self.xml.decode('utf-8')
self.xml = title_pattern.sub(u"<h:title>%s</h:title>" % title_xml,
self.xml)
self._set_hash()
if contains_xml_invalid_char(title_xml):
raise XLSFormError(
"Title shouldn't have any invalid xml "
"characters ('>' '&' '<')"
)
self.title = title_xml
def get_hash(self):
return u'md5:%s' % md5(self.xml.encode('utf8')).hexdigest()
def get_random_hash(self):
return u'md5:%s' % md5(
("%s-%s" % (
self.xml,
random.randint(0, 25101991)
)).encode('utf8')
).hexdigest()
@property
def random_hash(self):
return self.get_random_hash()
def _set_hash(self):
self.hash = self.get_hash()
def _set_id_string(self):
matches = self.instance_id_regex.findall(self.xml)
if len(matches) != 1:
raise XLSFormError("There should be a single id string.")
self.id_string = matches[0]
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields')
if update_fields:
kwargs['update_fields'] = list(
set(list(update_fields) + ['date_modified']))
if update_fields is None or 'title' in update_fields:
self._set_title()
if self.pk is None:
self._set_hash()
if update_fields is None or 'id_string' in update_fields:
old_id_string = self.id_string
self._set_id_string()
# check if we have an existing id_string,
# if so, the one must match but only if xform is NOT new
if self.pk and old_id_string and old_id_string != self.id_string \
and self.num_of_submissions > 0:
raise XLSFormError(
"Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." % {
'new_id': self.id_string,
'old_id': old_id_string
})
if getattr(settings, 'STRICT', True) and \
not re.search(r"^[\w-]+$", self.id_string):
raise XLSFormError(
'In strict mode, the XForm ID must be a '
'valid slug and contain no spaces.')
if 'skip_xls_read' in kwargs:
del kwargs['skip_xls_read']
super(XForm, self).save(*args, **kwargs)
def get_survey(self):
if not hasattr(self, "_survey"):
try:
builder = SurveyElementBuilder()
self._survey = \
builder.create_survey_element_from_json(self.json)
except ValueError:
xml = bytes(bytearray(self.xml, encoding='utf-8'))
self._survey = create_survey_element_from_xml(xml)
return self._survey
survey = property(get_survey)
def get_survey_elements(self):
return self.survey.iter_descendants()
def geopoint_xpaths(self):
survey_elements = self.get_survey_elements()
return [
e.get_abbreviated_xpath() for e in survey_elements
if e.bind.get(u'type') == u'geopoint'
]
def __str__(self):
return self.id_string
def type_for_form(content_object, data_type):
content_type = ContentType.objects.get_for_model(content_object)
return MetaData.objects.filter(object_id=content_object.id,
content_type=content_type,
data_type=data_type)
def is_valid_url(uri):
try:
URLValidator(uri)
except ValidationError:
return False
return True
def create_media(media):
"""Download media link"""
if is_valid_url(media.data_value):
filename = media.data_value.split('/')[-1]
data_file = NamedTemporaryFile()
content_type = mimetypes.guess_type(filename)
with closing(requests.get(media.data_value, stream=True)) as r:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
data_file.write(chunk)
data_file.seek(os.SEEK_SET, os.SEEK_END)
size = os.path.getsize(data_file.name)
data_file.seek(os.SEEK_SET)
media.data_value = filename
media.data_file = InMemoryUploadedFile(
data_file, 'data_file', filename, content_type,
size, charset=None)
return media
return None
def media_resources(media_list, download=False):
"""List of MetaData objects of type media
@param media_list - list of MetaData objects of type `media`
@param download - boolean, when True downloads media files when
media.data_value is a valid url
return a list of MetaData objects
"""
data = []
for media in media_list:
if media.data_file.name == '' and download:
media = create_media(media)
if media:
data.append(media)
else:
data.append(media)
return data
def meta_data_upload_to(instance, filename):
username = None
if instance.content_object.user is None and \
instance.content_type.model == 'instance':
username = instance.content_object.xform.user.username
else:
username = instance.content_object.user.username
if instance.data_type == 'media':
return os.path.join(username, 'formid-media', filename)
return os.path.join(username, 'docs', filename)
class MetaData(models.Model):
data_type = models.CharField(max_length=255)
data_value = models.CharField(max_length=255)
data_file = models.FileField(
upload_to=meta_data_upload_to, blank=True, null=True)
data_file_type = models.CharField(max_length=255, blank=True, null=True)
file_hash = models.CharField(max_length=50, blank=True, null=True)
date_created = models.DateTimeField(null=True, auto_now_add=True)
date_modified = models.DateTimeField(null=True, auto_now=True)
deleted_at = models.DateTimeField(null=True, default=None)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE,
default=get_default_content_type)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
objects = models.Manager()
class Meta:
unique_together = ('object_id', 'data_type', 'data_value',
'content_type')
def __str__(self):
return self.data_value
def file(self, username=None):
if hasattr(self, '_file'):
return self._file
url = requests.Request(
'GET', self.data_value, params={
'username': username
}
).prepare().url
self._file = MetaData.get_file(url)
return self._file
@staticmethod
def media_upload(content_object, data_file=None, download=False):
data_type = 'media'
if data_file:
allowed_types = settings.XFORM_SUPPORTED_MEDIA_UPLOAD_TYPES
data_content_type = data_file.content_type \
if data_file.content_type in allowed_types else \
mimetypes.guess_type(data_file.name)[0]
if data_content_type in allowed_types:
content_type = ContentType.objects.get_for_model(
content_object)
media, created = MetaData.objects.update_or_create(
data_type=data_type,
content_type=content_type,
object_id=content_object.id,
data_value=data_file.name,
defaults={
'data_file': data_file,
'data_file_type': data_content_type
})
return media_resources(
type_for_form(content_object, data_type), download)
@staticmethod
def get_md5(data_file):
hash_md5 = md5()
for chunk in iter(lambda: data_file.read(4096), b""):
hash_md5.update(chunk)
return 'md5:%s' % hash_md5.hexdigest()
@staticmethod
def get_file(url):
data_file = None
output = BytesIO()
def getsize(f):
f.seek(0)
f.read()
s = f.tell()
f.seek(0)
return s
r = requests.get(url, allow_redirects=True)
d = r.headers['content-disposition']
fname = re.findall("filename=\"(.+)\"", d)[0]
content_type = r.headers.get('content-type')
output.write(r.content)
size = getsize(output)
data_file = InMemoryUploadedFile(
file=output, name=fname,
field_name=None,
content_type=content_type,
charset='utf-8', size=size
)
return data_file
@staticmethod
def add_url(content_object, url=None, download=False):
data_type = 'url'
try:
data_file = MetaData.get_file(url)
except Exception:
return None
allowed_types = settings.XFORM_SUPPORTED_MEDIA_UPLOAD_TYPES
data_content_type = data_file.content_type \
if data_file.content_type in allowed_types else \
mimetypes.guess_type(data_file.name)[0]
if data_content_type in allowed_types:
content_type = ContentType.objects.get_for_model(
content_object)
media, created = MetaData.objects.update_or_create(
data_type=data_type,
content_type=content_type,
object_id=content_object.id,
data_value=url,
defaults={
'data_file': None,
'data_file_type': data_content_type
})
return media_resources(
type_for_form(content_object, data_type), download)
def save(self, *args, **kwargs):
self._set_hash()
super(MetaData, self).save(*args, **kwargs)
@property
def hash(self):
if self.file_hash is not None and self.file_hash != '':
return self.file_hash
else:
return self._set_hash()
def _set_hash(self):
if not self.data_file:
return None
file_exists = self.data_file.storage.exists(self.data_file.name)
if (file_exists and self.data_file.name != '') \
or (not file_exists and self.data_file):
try:
self.data_file.seek(os.SEEK_SET)
except IOError:
return ''
else:
self.file_hash = 'md5:%s' % md5(
self.data_file.read()).hexdigest()
return self.file_hash
return ''
class Instance(models.Model):
"""
Model representing a single submission to an XForm
"""
json = JSONField(default=dict, null=False)
xml = models.TextField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='instances', null=True, on_delete=models.CASCADE)
xform = models.ForeignKey('xform.XForm', null=False,
related_name='instances', on_delete=models.CASCADE)
# shows when we first received this instance
date_created = models.DateTimeField(auto_now_add=True)
# this will end up representing "date last parsed"
date_modified = models.DateTimeField(auto_now=True)
# this will be edited when we need to create a new InstanceHistory object
last_edited = models.DateTimeField(null=True, default=None)
# ODK keeps track of three statuses for an instance:
# incomplete, submitted, complete
# we add a fourth status: submitted_via_web
status = models.CharField(max_length=20,
default=u'submitted_via_web')
uuid = models.CharField(max_length=249, default=u'', db_index=True)
version = models.CharField(max_length=255, null=True)
# store a geographic objects associated with this instance
geom = models.GeometryCollectionField(null=True)
# Keep track of whether all media attachments have been received
media_all_received = models.NullBooleanField(
"Received All Media Attachemts",
null=True,
default=True)
total_media = models.PositiveIntegerField("Total Media Attachments",
null=True,
default=0)
media_count = models.PositiveIntegerField("Received Media Attachments",
null=True,
default=0)
checksum = models.CharField(max_length=64, null=True, blank=True,
db_index=True)
class Meta:
unique_together = ('xform', 'uuid')
def __str__(self):
return "Status: %s" % self.status
@property
def point(self):
gc = self.geom
if gc and len(gc):
return gc[0]
def get_duration(self):
data = self.get_dict()
# pylint: disable=no-member
start_name = _get_tag_or_element_type_xpath(self.xform, START)
end_name = _get_tag_or_element_type_xpath(self.xform, END)
start_time, end_time = data.get(start_name), data.get(end_name)
return calculate_duration(start_time, end_time)
@property
def num_of_media(self):
"""
Returns number of media attachments expected in the submission.
"""
if not hasattr(self, '_num_of_media'):
# pylint: disable=attribute-defined-outside-init
self._num_of_media = len(self.get_expected_media())
return self._num_of_media
@property
def attachments_count(self):
return len(set(self.attachments.filter(
name__in=self.get_expected_media()
).values_list('name', flat=True)))
def get_expected_media(self):
"""
Returns a list of expected media files from the submission data.
"""
if not hasattr(self, '_expected_media'):
# pylint: disable=no-member
data = self.get_dict()
media_list = []
if 'encryptedXmlFile' in data and self.xform.encrypted:
media_list.append(data['encryptedXmlFile'])
if 'media' in data:
# pylint: disable=no-member
media_list.extend([i['media/file'] for i in data['media']])
else:
media_xpaths = (self.xform.get_media_survey_xpaths() +
self.xform.get_osm_survey_xpaths())
for media_xpath in media_xpaths:
media_list.extend(
get_values_matching_key(data, media_xpath))
# pylint: disable=attribute-defined-outside-init
self._expected_media = list(set(media_list))
return self._expected_media
def numeric_converter(self, json_dict, numeric_fields=None):
if numeric_fields is None:
# pylint: disable=no-member
numeric_fields = get_numeric_fields(self.xform)
for key, value in json_dict.items():
if isinstance(value, (str, bytes)) and key in numeric_fields:
converted_value = numeric_checker(value)
if converted_value:
json_dict[key] = converted_value
elif isinstance(value, dict):
json_dict[key] = self.numeric_converter(
value, numeric_fields)
elif isinstance(value, list):
for k, v in enumerate(value):
if isinstance(v, (str, bytes)) and key in numeric_fields:
converted_value = numeric_checker(v)
if converted_value:
json_dict[key] = converted_value
elif isinstance(v, dict):
value[k] = self.numeric_converter(
v, numeric_fields)
return json_dict
def _set_geom(self):
# pylint: disable=no-member
xform = self.xform
geo_xpaths = xform.geopoint_xpaths()
doc = self.get_dict()
points = []
if geo_xpaths:
for xpath in geo_xpaths:
for gps in get_values_matching_key(doc, xpath):
try:
geometry = [float(s) for s in gps.split()]
lat, lng = geometry[0:2]
points.append(Point(lng, lat))
except ValueError:
return
if not xform.instances_with_geopoints and len(points):
xform.instances_with_geopoints = True
xform.save()
self.geom = GeometryCollection(points)
def _check_active(self, force):
"""Check that form is active and raise exception if not.
:param force: Ignore restrictions on saving.
"""
# pylint: disable=no-member
# if not force and self.xform and not self.xform.downloadable:
# raise FormInactiveError()
pass
def _set_json(self):
self.json = self.get_full_dict()
def get_full_dict(self, load_existing=True):
doc = self.json or {} if load_existing else {}
# Get latest dict
doc = self.get_dict()
# pylint: disable=no-member
if self.id:
doc.update({
UUID: self.uuid,
ID: self.id,
# BAMBOO_DATASET_ID: self.xform.bamboo_dataset,
ATTACHMENTS: _get_attachments_from_instance(self),
STATUS: self.status,
# TAGS: list(self.tags.names()),
NOTES: [],
VERSION: self.version,
DURATION: self.get_duration(),
XFORM_ID_STRING: self._parser.get_xform_id_string(),
XFORM_ID: self.xform.pk,
GEOLOCATION: [self.point.y, self.point.x] if self.point
else [None, None],
SUBMITTED_BY: self.user.username if self.user else None
})
# for osm in self.osm_data.all():
# doc.update(osm.get_tags_with_prefix())
if not self.date_created:
self.date_created = timezone.now()
doc[SUBMISSION_TIME] = self.date_created.strftime(
'%Y-%m-%dT%H:%M:%S')
doc[TOTAL_MEDIA] = self.total_media
doc[MEDIA_COUNT] = self.media_count
doc[MEDIA_ALL_RECEIVED] = self.media_all_received
edited = False
if hasattr(self, 'last_edited'):
edited = self.last_edited is not None
doc[EDITED] = edited
edited and doc.update({
LAST_EDITED: convert_to_serializable_date(self.last_edited)
})
return doc
def get_dict(self, force_new=False, flat=True):
"""Return a python object representation of this instance's XML."""
self._set_parser()
instance_dict = self._parser.get_flat_dict_with_attributes() if flat \
else self._parser.to_dict()
return self.numeric_converter(instance_dict)
def _set_survey_type(self):
self.survey_type = self.get_root_node_name()
def _set_parser(self):
if not hasattr(self, "_parser"):
# pylint: disable=no-member
self._parser = XFormInstanceParser(self.xml, self.xform)
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
def _set_uuid(self):
# pylint: disable=no-member, attribute-defined-outside-init
if self.xml and not self.uuid:
# pylint: disable=no-member
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def save(self, *args, **kwargs):
force = kwargs.get('force')
if force:
del kwargs['force']
# self._check_is_merged_dataset()
self._check_active(force)
self._set_geom()
self._set_json()
self._set_survey_type()
self._set_uuid()
# pylint: disable=no-member
self.version = self.json.get(VERSION, self.xform.version)
super(Instance, self).save(*args, **kwargs)
class Attachment(models.Model):
OSM = 'osm'
instance = models.ForeignKey(
Instance, related_name="attachments", on_delete=models.CASCADE)
media_file = models.FileField(
max_length=255, upload_to=upload_to)
mimetype = models.CharField(
max_length=100, null=False, blank=True, default='')
extension = models.CharField(
max_length=10, null=False, blank=False, default=u"non", db_index=True)
date_created = models.DateTimeField(null=True, auto_now_add=True)
date_modified = models.DateTimeField(null=True, auto_now=True)
file_size = models.PositiveIntegerField(default=0)
name = models.CharField(max_length=100, null=True, blank=True)
class Meta:
ordering = ("pk", )
def save(self, *args, **kwargs):
if self.media_file and self.mimetype == '':
# guess mimetype
mimetype, encoding = mimetypes.guess_type(self.media_file.name)
if mimetype:
self.mimetype = mimetype
if self.media_file and len(self.media_file.name) > 255:
raise ValueError(
"Length of the media file should be less or equal to 255")
try:
f_size = self.media_file.size
if f_size:
self.file_size = f_size
except (OSError, AttributeError):
pass
try:
self.name = self.filename
self.extension = self.name.rsplit('.', 1)[1]
except Exception:
pass
super(Attachment, self).save(*args, **kwargs)
@property
def file_hash(self):
if self.media_file.storage.exists(self.media_file.name):
return u'%s' % md5(self.media_file.read()).hexdigest()
return u''
@property
def filename(self):
if self.media_file:
return os.path.basename(self.media_file.name)
def is_newline_error(e):
"""
Return True is e is a new line error based on the error text.
Otherwise return False.
"""
newline_error = u'new-line character seen in unquoted field - do you need'\
u' to open the file in universal-newline mode?'
return newline_error == str(e)
def process_xlsform(xls, default_name):
"""
Process XLSForm file and return the survey dictionary for the XLSForm.
"""
# FLOW Results package is a JSON file.
file_object = None
if xls.name.endswith('csv'):
# a csv file gets closed in pyxform, make a copy
xls.seek(0)
file_object = BytesIO()
file_object.write(xls.read())
file_object.seek(0)
xls.seek(0)
try:
return parse_file_to_json(xls.name, file_object=file_object or xls)
except csv.Error as e:
if is_newline_error(e):
xls.seek(0)
file_object = StringIO(
u'\n'.join(xls.read().splitlines()))
return parse_file_to_json(
xls.name, default_name=default_name, file_object=file_object)
raise e
def get_columns_with_hxl(survey_elements):
'''
Returns a dictionary whose keys are xform field names and values are
`instance::hxl` values set on the xform
:param include_hxl - boolean value
:param survey_elements - survey elements of an xform
return dictionary or None
'''
return survey_elements and {
se.get('name'): val.get('hxl')
for se in survey_elements
for key, val in se.items()
if key == 'instance' and val and 'hxl' in val
}
def check_version_set(survey):
"""
Checks if the version has been set in the xls file and if not adds
the default version in this datetime (yyyymmddhhmm) format.
"""
# get the json and check for the version key
survey_json = json.loads(survey.to_json())
if not survey_json.get("version"):
# set utc time as the default version
survey_json['version'] = \
timezone.now().strftime("%Y%m%d%H%M")
builder = SurveyElementBuilder()
survey = builder.create_survey_element_from_json(
json.dumps(survey_json))
return survey
class DataDictionary(XForm): # pylint: disable=too-many-instance-attributes
"""
DataDictionary model class.
"""
def __init__(self, *args, **kwargs):
self.instances_for_export = lambda d: d.instances.all()
self.has_external_choices = False
self._id_string_changed = False
super(DataDictionary, self).__init__(*args, **kwargs)
def __str__(self):
return getattr(self, "id_string", "")
def save(self, *args, **kwargs):
skip_xls_read = kwargs.get('skip_xls_read')
if self.xls and not skip_xls_read:
default_name = None \
if not self.pk else self.survey.xml_instance().tagName
survey_dict = process_xlsform(self.xls, default_name)
if has_external_choices(survey_dict):
self.has_external_choices = True
survey = create_survey_element_from_dict(survey_dict)
survey = check_version_set(survey)
if get_columns_with_hxl(survey.get('children')):
self.has_hxl_support = True
# if form is being replaced, don't check for id_string uniqueness
if self.pk is None:
new_id_string = self.get_unique_id_string(
survey.get('id_string'))
self._id_string_changed = \
new_id_string != survey.get('id_string')
survey['id_string'] = new_id_string
# For flow results packages use the user defined id/uuid
elif self.id_string != survey.get('id_string'):
raise XLSFormError(
("Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." % {
'new_id': survey.get('id_string'),
'old_id': self.id_string}))
elif default_name and default_name != survey.get('name'):
survey['name'] = default_name
else:
survey['id_string'] = self.id_string
self.json = survey.to_json()
self.xml = survey.to_xml()
self.version = survey.get('version')
self.last_updated_at = timezone.now()
self.title = survey.get('title')
self._mark_start_time_boolean()
set_uuid(self)
self._set_uuid_in_xml()
self._set_hash()
if 'skip_xls_read' in kwargs:
del kwargs['skip_xls_read']
super(DataDictionary, self).save(*args, **kwargs)
def file_name(self):
return os.path.split(self.xls.name)[-1]
def sheet_to_csv(xls_content, sheet_name):
"""Writes a csv file of a specified sheet from a an excel file
:param xls_content: Excel file contents
:param sheet_name: the name of the excel sheet to generate the csv file
:returns: a (StrionIO) csv file object
"""
workbook = xlrd.open_workbook(file_contents=xls_content)
sheet = workbook.sheet_by_name(sheet_name)
if not sheet or sheet.nrows < 2:
raise Exception("Sheet <'%(sheet_name)s'> has no data." % {
'sheet_name': sheet_name})
csv_file = BytesIO()
writer = csv.writer(csv_file, encoding='utf-8', quoting=csv.QUOTE_ALL)
mask = [v and len(v.strip()) > 0 for v in sheet.row_values(0)]
header = [v for v, m in zip(sheet.row_values(0), mask) if m]
writer.writerow(header)
name_column = None
try:
name_column = header.index('name')
except ValueError:
pass
integer_fields = False
date_fields = False
if name_column:
name_column_values = sheet.col_values(name_column)
for index in range(len(name_column_values)):
if sheet.cell_type(index, name_column) == xlrd.XL_CELL_NUMBER:
integer_fields = True
elif sheet.cell_type(index, name_column) == xlrd.XL_CELL_DATE:
date_fields = True
for row in range(1, sheet.nrows):
if integer_fields or date_fields:
# convert integers to string/datetime if name has numbers/dates
row_values = []
for index, val in enumerate(sheet.row_values(row)):
if sheet.cell_type(row, index) == xlrd.XL_CELL_NUMBER:
try:
val = str(
float(val) if (
float(val) > int(val)
) else int(val)
)
except ValueError:
pass
elif sheet.cell_type(row, index) == xlrd.XL_CELL_DATE:
val = xlrd.xldate_as_datetime(
val, workbook.datemode).isoformat()
row_values.append(val)
writer.writerow([v for v, m in zip(row_values, mask) if m])
else:
writer.writerow(
[v for v, m in zip(sheet.row_values(row), mask) if m])
return csv_file
def set_object_permissions(sender, instance=None, created=False, **kwargs):
"""
Apply the relevant object permissions for the form to all users who should
have access to it.
"""
# seems the super is not called, have to get xform from here
xform = XForm.objects.get(pk=instance.pk)
if hasattr(instance, 'has_external_choices') \
and instance.has_external_choices:
instance.xls.seek(0)
f = sheet_to_csv(instance.xls.read(), 'external_choices')
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
data_file = InMemoryUploadedFile(
file=f,
field_name='data_file',
name='itemsets.csv',
content_type='text/csv',
size=size,
charset=None
)
MetaData.media_upload(xform, data_file)
post_save.connect(set_object_permissions, sender=DataDictionary,
dispatch_uid='xform_object_permissions')
| 34.818865
| 96
| 0.601995
|
import csv
import json
import mimetypes
import os
import random
import re
import requests
import xlrd
from contextlib import closing
from hashlib import md5
from io import BytesIO
from io import StringIO
from pyxform import SurveyElementBuilder
from pyxform.builder import create_survey_element_from_dict
from pyxform.utils import has_external_choices
from pyxform.xform2json import create_survey_element_from_xml
from pyxform.xls2json import parse_file_to_json
from xml.dom import Node
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection, Point
from django.core.exceptions import ValidationError
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import URLValidator
from django.db.models.signals import post_save
from django.utils import timezone
from .tags import (
UUID, ID, ATTACHMENTS, STATUS, NOTES, VERSION, DURATION, XFORM_ID_STRING,
XFORM_ID, GEOLOCATION, SUBMITTED_BY, SUBMISSION_TIME, TOTAL_MEDIA,
MEDIA_COUNT, MEDIA_ALL_RECEIVED, EDITED, LAST_EDITED, KNOWN_MEDIA_TYPES,
START, END
)
from .utils import (
get_values_matching_key, get_uuid_from_xml, set_uuid, XFormInstanceParser,
clean_and_parse_xml, get_numeric_fields, numeric_checker,
_get_tag_or_element_type_xpath, calculate_duration
)
if 'postg' in settings.DATABASES['default']['ENGINE']:
from django.contrib.postgres.fields import JSONField
else:
from jsonfield import JSONField
CHUNK_SIZE = 1024
XFORM_TITLE_LENGTH = 255
title_pattern = re.compile(r"<h:title>(.*?)</h:title>")
def contains_xml_invalid_char(text, invalids=['&', '>', '<']):
return 1 in [c in text for c in invalids]
def convert_to_serializable_date(date):
if hasattr(date, 'isoformat'):
return date.isoformat()
return date
def _get_attachments_from_instance(instance):
attachments = []
for a in instance.attachments.all():
attachment = dict()
attachment['download_url'] = a.media_file.url
attachment['small_download_url'] = a.media_file.url
attachment['medium_download_url'] = a.media_file.url
attachment['mimetype'] = a.mimetype
attachment['filename'] = a.media_file.name
attachment['name'] = a.name
attachment['instance'] = a.instance.pk
attachment['xform'] = instance.xform.id
attachment['id'] = a.id
attachments.append(attachment)
return attachments
def get_default_content_type():
content_object, created = ContentType.objects.get_or_create(
app_label="xform", model='xform')
return content_object.id
def upload_to(instance, filename):
try:
return os.path.join(
instance.user.username, 'xls',
os.path.split(filename)[1])
except Exception:
folder = "{}_{}".format(instance.instance.xform.id,
instance.instance.xform.id_string)
return os.path.join(
instance.instance.xform.user.username, 'attachments', folder,
os.path.split(filename)[1])
class XLSFormError(Exception):
pass
class FormInactiveError(Exception):
pass
class XForm(models.Model):
dynamic_choices = True
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default=u'')
description = models.TextField(default=u'', null=True, blank=True)
xml = models.TextField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='xforms', null=True, on_delete=models.CASCADE)
id_string = models.SlugField(
editable=False,
verbose_name="ID",
max_length=100)
title = models.CharField(editable=False, max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
last_submission_time = models.DateTimeField(blank=True, null=True)
has_start_time = models.BooleanField(default=False)
uuid = models.CharField(max_length=36, default=u'')
uuid_regex = re.compile(r'(<instance>.*?id="[^"]+">)(.*</instance>)(.*)',
re.DOTALL)
instance_id_regex = re.compile(r'<instance>.*?id="([^"]+)".*</instance>',
re.DOTALL)
instances_with_geopoints = models.BooleanField(default=False)
num_of_submissions = models.IntegerField(default=0)
version = models.CharField(
max_length=255, null=True, blank=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE)
metadata_set = GenericRelation(
'MetaData',
content_type_field='content_type_id',
object_id_field="object_id")
has_hxl_support = models.BooleanField(default=False)
last_updated_at = models.DateTimeField(auto_now=True)
hash = models.CharField("Hash", max_length=36, blank=True, null=True,
default=None)
class Meta:
unique_together = ("user", "id_string",)
verbose_name = "XForm"
verbose_name_plural = "XForms"
ordering = ("pk", )
def get_osm_survey_xpaths(self):
return [
elem.get_abbreviated_xpath()
for elem in self.get_survey_elements_of_type('osm')]
def get_media_survey_xpaths(self):
return [
e.get_abbreviated_xpath()
for e in sum([
self.get_survey_elements_of_type(m) for m in KNOWN_MEDIA_TYPES
], [])
]
def file_name(self):
return self.id_string + ".xml"
def get_survey_elements_of_type(self, element_type):
return [
e for e in self.get_survey_elements() if e.type == element_type
]
def _set_uuid_in_xml(self, file_name=None):
if not file_name:
file_name = self.file_name()
file_name, file_ext = os.path.splitext(file_name)
doc = clean_and_parse_xml(self.xml)
model_nodes = doc.getElementsByTagName("model")
if len(model_nodes) != 1:
raise Exception(u"xml contains multiple model nodes")
model_node = model_nodes[0]
instance_nodes = [
node for node in model_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
node.tagName.lower() == "instance" and not node.hasAttribute("id")
]
if len(instance_nodes) != 1:
raise Exception("Multiple instance nodes without the id "
"attribute, can't tell which is the main one")
instance_node = instance_nodes[0]
# get the first child whose id attribute matches our id_string
survey_nodes = [
node for node in instance_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
(node.tagName == file_name or node.attributes.get('id'))
]
if len(survey_nodes) != 1:
raise Exception(
"Multiple survey nodes with the id '%s'" % self.id_string)
survey_node = survey_nodes[0]
formhub_nodes = [
n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName == "formhub"
]
if len(formhub_nodes) > 1:
raise Exception(
"Multiple formhub nodes within main instance node")
elif len(formhub_nodes) == 1:
formhub_node = formhub_nodes[0]
else:
formhub_node = survey_node.insertBefore(
doc.createElement("formhub"), survey_node.firstChild)
uuid_nodes = [
node for node in formhub_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "uuid"
]
if len(uuid_nodes) == 0:
formhub_node.appendChild(doc.createElement("uuid"))
if len(formhub_nodes) == 0:
# append the calculate bind node
calculate_node = doc.createElement("bind")
calculate_node.setAttribute(
"nodeset", "/%s/formhub/uuid" % survey_node.tagName)
calculate_node.setAttribute("type", "string")
calculate_node.setAttribute("calculate", "'%s'" % self.uuid)
model_node.appendChild(calculate_node)
self.xml = doc.toprettyxml(indent=" ", encoding='utf-8')
# hack
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\
# and-silly-whitespace/
text_re = re.compile('(>)\n\s*(\s[^<>\s].*?)\n\s*(\s</)', re.DOTALL)
output_re = re.compile('\n.*(<output.*>)\n( )*')
pretty_xml = text_re.sub(lambda m: ''.join(m.group(1, 2, 3)),
self.xml.decode('utf-8'))
inline_output = output_re.sub('\g<1>', pretty_xml)
inline_output = re.compile('<label>\s*\n*\s*\n*\s*</label>').sub(
'<label></label>', inline_output)
self.xml = inline_output
def _mark_start_time_boolean(self):
starttime_substring = 'jr:preloadParams="start"'
if self.xml.find(starttime_substring) != -1:
self.has_start_time = True
else:
self.has_start_time = False
def _id_string_already_exists_in_account(self, id_string):
try:
XForm.objects.get(id_string__iexact=id_string)
except XForm.DoesNotExist:
return False
return True
def get_unique_id_string(self, id_string, count=0):
# used to generate a new id_string for new data_dictionary object if
# id_string already existed
if self._id_string_already_exists_in_account(id_string):
if count != 0:
if re.match(r'\w+_\d+$', id_string):
a = id_string.split('_')
id_string = "_".join(a[:-1])
count += 1
id_string = "{}_{}".format(id_string, count)
return self.get_unique_id_string(id_string, count)
return id_string
def _set_title(self):
xml = re.sub(r"\s+", " ", self.xml)
matches = title_pattern.findall(xml)
if len(matches) != 1:
raise XLSFormError(("There should be a single title."), matches)
if matches:
title_xml = matches[0][:XFORM_TITLE_LENGTH]
else:
title_xml = self.title[:XFORM_TITLE_LENGTH] if self.title else ''
if self.title and title_xml != self.title:
title_xml = self.title[:XFORM_TITLE_LENGTH]
if isinstance(self.xml, bytes):
self.xml = self.xml.decode('utf-8')
self.xml = title_pattern.sub(u"<h:title>%s</h:title>" % title_xml,
self.xml)
self._set_hash()
if contains_xml_invalid_char(title_xml):
raise XLSFormError(
"Title shouldn't have any invalid xml "
"characters ('>' '&' '<')"
)
self.title = title_xml
def get_hash(self):
return u'md5:%s' % md5(self.xml.encode('utf8')).hexdigest()
def get_random_hash(self):
return u'md5:%s' % md5(
("%s-%s" % (
self.xml,
random.randint(0, 25101991)
)).encode('utf8')
).hexdigest()
@property
def random_hash(self):
return self.get_random_hash()
def _set_hash(self):
self.hash = self.get_hash()
def _set_id_string(self):
matches = self.instance_id_regex.findall(self.xml)
if len(matches) != 1:
raise XLSFormError("There should be a single id string.")
self.id_string = matches[0]
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields')
if update_fields:
kwargs['update_fields'] = list(
set(list(update_fields) + ['date_modified']))
if update_fields is None or 'title' in update_fields:
self._set_title()
if self.pk is None:
self._set_hash()
if update_fields is None or 'id_string' in update_fields:
old_id_string = self.id_string
self._set_id_string()
if self.pk and old_id_string and old_id_string != self.id_string \
and self.num_of_submissions > 0:
raise XLSFormError(
"Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." % {
'new_id': self.id_string,
'old_id': old_id_string
})
if getattr(settings, 'STRICT', True) and \
not re.search(r"^[\w-]+$", self.id_string):
raise XLSFormError(
'In strict mode, the XForm ID must be a '
'valid slug and contain no spaces.')
if 'skip_xls_read' in kwargs:
del kwargs['skip_xls_read']
super(XForm, self).save(*args, **kwargs)
def get_survey(self):
if not hasattr(self, "_survey"):
try:
builder = SurveyElementBuilder()
self._survey = \
builder.create_survey_element_from_json(self.json)
except ValueError:
xml = bytes(bytearray(self.xml, encoding='utf-8'))
self._survey = create_survey_element_from_xml(xml)
return self._survey
survey = property(get_survey)
def get_survey_elements(self):
return self.survey.iter_descendants()
def geopoint_xpaths(self):
survey_elements = self.get_survey_elements()
return [
e.get_abbreviated_xpath() for e in survey_elements
if e.bind.get(u'type') == u'geopoint'
]
def __str__(self):
return self.id_string
def type_for_form(content_object, data_type):
content_type = ContentType.objects.get_for_model(content_object)
return MetaData.objects.filter(object_id=content_object.id,
content_type=content_type,
data_type=data_type)
def is_valid_url(uri):
try:
URLValidator(uri)
except ValidationError:
return False
return True
def create_media(media):
if is_valid_url(media.data_value):
filename = media.data_value.split('/')[-1]
data_file = NamedTemporaryFile()
content_type = mimetypes.guess_type(filename)
with closing(requests.get(media.data_value, stream=True)) as r:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
data_file.write(chunk)
data_file.seek(os.SEEK_SET, os.SEEK_END)
size = os.path.getsize(data_file.name)
data_file.seek(os.SEEK_SET)
media.data_value = filename
media.data_file = InMemoryUploadedFile(
data_file, 'data_file', filename, content_type,
size, charset=None)
return media
return None
def media_resources(media_list, download=False):
data = []
for media in media_list:
if media.data_file.name == '' and download:
media = create_media(media)
if media:
data.append(media)
else:
data.append(media)
return data
def meta_data_upload_to(instance, filename):
username = None
if instance.content_object.user is None and \
instance.content_type.model == 'instance':
username = instance.content_object.xform.user.username
else:
username = instance.content_object.user.username
if instance.data_type == 'media':
return os.path.join(username, 'formid-media', filename)
return os.path.join(username, 'docs', filename)
class MetaData(models.Model):
data_type = models.CharField(max_length=255)
data_value = models.CharField(max_length=255)
data_file = models.FileField(
upload_to=meta_data_upload_to, blank=True, null=True)
data_file_type = models.CharField(max_length=255, blank=True, null=True)
file_hash = models.CharField(max_length=50, blank=True, null=True)
date_created = models.DateTimeField(null=True, auto_now_add=True)
date_modified = models.DateTimeField(null=True, auto_now=True)
deleted_at = models.DateTimeField(null=True, default=None)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE,
default=get_default_content_type)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
objects = models.Manager()
class Meta:
unique_together = ('object_id', 'data_type', 'data_value',
'content_type')
def __str__(self):
return self.data_value
def file(self, username=None):
if hasattr(self, '_file'):
return self._file
url = requests.Request(
'GET', self.data_value, params={
'username': username
}
).prepare().url
self._file = MetaData.get_file(url)
return self._file
@staticmethod
def media_upload(content_object, data_file=None, download=False):
data_type = 'media'
if data_file:
allowed_types = settings.XFORM_SUPPORTED_MEDIA_UPLOAD_TYPES
data_content_type = data_file.content_type \
if data_file.content_type in allowed_types else \
mimetypes.guess_type(data_file.name)[0]
if data_content_type in allowed_types:
content_type = ContentType.objects.get_for_model(
content_object)
media, created = MetaData.objects.update_or_create(
data_type=data_type,
content_type=content_type,
object_id=content_object.id,
data_value=data_file.name,
defaults={
'data_file': data_file,
'data_file_type': data_content_type
})
return media_resources(
type_for_form(content_object, data_type), download)
@staticmethod
def get_md5(data_file):
hash_md5 = md5()
for chunk in iter(lambda: data_file.read(4096), b""):
hash_md5.update(chunk)
return 'md5:%s' % hash_md5.hexdigest()
@staticmethod
def get_file(url):
data_file = None
output = BytesIO()
def getsize(f):
f.seek(0)
f.read()
s = f.tell()
f.seek(0)
return s
r = requests.get(url, allow_redirects=True)
d = r.headers['content-disposition']
fname = re.findall("filename=\"(.+)\"", d)[0]
content_type = r.headers.get('content-type')
output.write(r.content)
size = getsize(output)
data_file = InMemoryUploadedFile(
file=output, name=fname,
field_name=None,
content_type=content_type,
charset='utf-8', size=size
)
return data_file
@staticmethod
def add_url(content_object, url=None, download=False):
data_type = 'url'
try:
data_file = MetaData.get_file(url)
except Exception:
return None
allowed_types = settings.XFORM_SUPPORTED_MEDIA_UPLOAD_TYPES
data_content_type = data_file.content_type \
if data_file.content_type in allowed_types else \
mimetypes.guess_type(data_file.name)[0]
if data_content_type in allowed_types:
content_type = ContentType.objects.get_for_model(
content_object)
media, created = MetaData.objects.update_or_create(
data_type=data_type,
content_type=content_type,
object_id=content_object.id,
data_value=url,
defaults={
'data_file': None,
'data_file_type': data_content_type
})
return media_resources(
type_for_form(content_object, data_type), download)
def save(self, *args, **kwargs):
self._set_hash()
super(MetaData, self).save(*args, **kwargs)
@property
def hash(self):
if self.file_hash is not None and self.file_hash != '':
return self.file_hash
else:
return self._set_hash()
def _set_hash(self):
if not self.data_file:
return None
file_exists = self.data_file.storage.exists(self.data_file.name)
if (file_exists and self.data_file.name != '') \
or (not file_exists and self.data_file):
try:
self.data_file.seek(os.SEEK_SET)
except IOError:
return ''
else:
self.file_hash = 'md5:%s' % md5(
self.data_file.read()).hexdigest()
return self.file_hash
return ''
class Instance(models.Model):
json = JSONField(default=dict, null=False)
xml = models.TextField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='instances', null=True, on_delete=models.CASCADE)
xform = models.ForeignKey('xform.XForm', null=False,
related_name='instances', on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
last_edited = models.DateTimeField(null=True, default=None)
status = models.CharField(max_length=20,
default=u'submitted_via_web')
uuid = models.CharField(max_length=249, default=u'', db_index=True)
version = models.CharField(max_length=255, null=True)
geom = models.GeometryCollectionField(null=True)
media_all_received = models.NullBooleanField(
"Received All Media Attachemts",
null=True,
default=True)
total_media = models.PositiveIntegerField("Total Media Attachments",
null=True,
default=0)
media_count = models.PositiveIntegerField("Received Media Attachments",
null=True,
default=0)
checksum = models.CharField(max_length=64, null=True, blank=True,
db_index=True)
class Meta:
unique_together = ('xform', 'uuid')
def __str__(self):
return "Status: %s" % self.status
@property
def point(self):
gc = self.geom
if gc and len(gc):
return gc[0]
def get_duration(self):
data = self.get_dict()
start_name = _get_tag_or_element_type_xpath(self.xform, START)
end_name = _get_tag_or_element_type_xpath(self.xform, END)
start_time, end_time = data.get(start_name), data.get(end_name)
return calculate_duration(start_time, end_time)
@property
def num_of_media(self):
if not hasattr(self, '_num_of_media'):
self._num_of_media = len(self.get_expected_media())
return self._num_of_media
@property
def attachments_count(self):
return len(set(self.attachments.filter(
name__in=self.get_expected_media()
).values_list('name', flat=True)))
def get_expected_media(self):
if not hasattr(self, '_expected_media'):
data = self.get_dict()
media_list = []
if 'encryptedXmlFile' in data and self.xform.encrypted:
media_list.append(data['encryptedXmlFile'])
if 'media' in data:
media_list.extend([i['media/file'] for i in data['media']])
else:
media_xpaths = (self.xform.get_media_survey_xpaths() +
self.xform.get_osm_survey_xpaths())
for media_xpath in media_xpaths:
media_list.extend(
get_values_matching_key(data, media_xpath))
self._expected_media = list(set(media_list))
return self._expected_media
def numeric_converter(self, json_dict, numeric_fields=None):
if numeric_fields is None:
numeric_fields = get_numeric_fields(self.xform)
for key, value in json_dict.items():
if isinstance(value, (str, bytes)) and key in numeric_fields:
converted_value = numeric_checker(value)
if converted_value:
json_dict[key] = converted_value
elif isinstance(value, dict):
json_dict[key] = self.numeric_converter(
value, numeric_fields)
elif isinstance(value, list):
for k, v in enumerate(value):
if isinstance(v, (str, bytes)) and key in numeric_fields:
converted_value = numeric_checker(v)
if converted_value:
json_dict[key] = converted_value
elif isinstance(v, dict):
value[k] = self.numeric_converter(
v, numeric_fields)
return json_dict
def _set_geom(self):
xform = self.xform
geo_xpaths = xform.geopoint_xpaths()
doc = self.get_dict()
points = []
if geo_xpaths:
for xpath in geo_xpaths:
for gps in get_values_matching_key(doc, xpath):
try:
geometry = [float(s) for s in gps.split()]
lat, lng = geometry[0:2]
points.append(Point(lng, lat))
except ValueError:
return
if not xform.instances_with_geopoints and len(points):
xform.instances_with_geopoints = True
xform.save()
self.geom = GeometryCollection(points)
def _check_active(self, force):
pass
def _set_json(self):
self.json = self.get_full_dict()
def get_full_dict(self, load_existing=True):
doc = self.json or {} if load_existing else {}
doc = self.get_dict()
if self.id:
doc.update({
UUID: self.uuid,
ID: self.id,
ATTACHMENTS: _get_attachments_from_instance(self),
STATUS: self.status,
NOTES: [],
VERSION: self.version,
DURATION: self.get_duration(),
XFORM_ID_STRING: self._parser.get_xform_id_string(),
XFORM_ID: self.xform.pk,
GEOLOCATION: [self.point.y, self.point.x] if self.point
else [None, None],
SUBMITTED_BY: self.user.username if self.user else None
})
if not self.date_created:
self.date_created = timezone.now()
doc[SUBMISSION_TIME] = self.date_created.strftime(
'%Y-%m-%dT%H:%M:%S')
doc[TOTAL_MEDIA] = self.total_media
doc[MEDIA_COUNT] = self.media_count
doc[MEDIA_ALL_RECEIVED] = self.media_all_received
edited = False
if hasattr(self, 'last_edited'):
edited = self.last_edited is not None
doc[EDITED] = edited
edited and doc.update({
LAST_EDITED: convert_to_serializable_date(self.last_edited)
})
return doc
def get_dict(self, force_new=False, flat=True):
self._set_parser()
instance_dict = self._parser.get_flat_dict_with_attributes() if flat \
else self._parser.to_dict()
return self.numeric_converter(instance_dict)
def _set_survey_type(self):
self.survey_type = self.get_root_node_name()
def _set_parser(self):
if not hasattr(self, "_parser"):
self._parser = XFormInstanceParser(self.xml, self.xform)
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
def _set_uuid(self):
if self.xml and not self.uuid:
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def save(self, *args, **kwargs):
force = kwargs.get('force')
if force:
del kwargs['force']
self._check_active(force)
self._set_geom()
self._set_json()
self._set_survey_type()
self._set_uuid()
self.version = self.json.get(VERSION, self.xform.version)
super(Instance, self).save(*args, **kwargs)
class Attachment(models.Model):
OSM = 'osm'
instance = models.ForeignKey(
Instance, related_name="attachments", on_delete=models.CASCADE)
media_file = models.FileField(
max_length=255, upload_to=upload_to)
mimetype = models.CharField(
max_length=100, null=False, blank=True, default='')
extension = models.CharField(
max_length=10, null=False, blank=False, default=u"non", db_index=True)
date_created = models.DateTimeField(null=True, auto_now_add=True)
date_modified = models.DateTimeField(null=True, auto_now=True)
file_size = models.PositiveIntegerField(default=0)
name = models.CharField(max_length=100, null=True, blank=True)
class Meta:
ordering = ("pk", )
def save(self, *args, **kwargs):
if self.media_file and self.mimetype == '':
mimetype, encoding = mimetypes.guess_type(self.media_file.name)
if mimetype:
self.mimetype = mimetype
if self.media_file and len(self.media_file.name) > 255:
raise ValueError(
"Length of the media file should be less or equal to 255")
try:
f_size = self.media_file.size
if f_size:
self.file_size = f_size
except (OSError, AttributeError):
pass
try:
self.name = self.filename
self.extension = self.name.rsplit('.', 1)[1]
except Exception:
pass
super(Attachment, self).save(*args, **kwargs)
@property
def file_hash(self):
if self.media_file.storage.exists(self.media_file.name):
return u'%s' % md5(self.media_file.read()).hexdigest()
return u''
@property
def filename(self):
if self.media_file:
return os.path.basename(self.media_file.name)
def is_newline_error(e):
newline_error = u'new-line character seen in unquoted field - do you need'\
u' to open the file in universal-newline mode?'
return newline_error == str(e)
def process_xlsform(xls, default_name):
file_object = None
if xls.name.endswith('csv'):
xls.seek(0)
file_object = BytesIO()
file_object.write(xls.read())
file_object.seek(0)
xls.seek(0)
try:
return parse_file_to_json(xls.name, file_object=file_object or xls)
except csv.Error as e:
if is_newline_error(e):
xls.seek(0)
file_object = StringIO(
u'\n'.join(xls.read().splitlines()))
return parse_file_to_json(
xls.name, default_name=default_name, file_object=file_object)
raise e
def get_columns_with_hxl(survey_elements):
return survey_elements and {
se.get('name'): val.get('hxl')
for se in survey_elements
for key, val in se.items()
if key == 'instance' and val and 'hxl' in val
}
def check_version_set(survey):
survey_json = json.loads(survey.to_json())
if not survey_json.get("version"):
survey_json['version'] = \
timezone.now().strftime("%Y%m%d%H%M")
builder = SurveyElementBuilder()
survey = builder.create_survey_element_from_json(
json.dumps(survey_json))
return survey
class DataDictionary(XForm):
def __init__(self, *args, **kwargs):
self.instances_for_export = lambda d: d.instances.all()
self.has_external_choices = False
self._id_string_changed = False
super(DataDictionary, self).__init__(*args, **kwargs)
def __str__(self):
return getattr(self, "id_string", "")
def save(self, *args, **kwargs):
skip_xls_read = kwargs.get('skip_xls_read')
if self.xls and not skip_xls_read:
default_name = None \
if not self.pk else self.survey.xml_instance().tagName
survey_dict = process_xlsform(self.xls, default_name)
if has_external_choices(survey_dict):
self.has_external_choices = True
survey = create_survey_element_from_dict(survey_dict)
survey = check_version_set(survey)
if get_columns_with_hxl(survey.get('children')):
self.has_hxl_support = True
if self.pk is None:
new_id_string = self.get_unique_id_string(
survey.get('id_string'))
self._id_string_changed = \
new_id_string != survey.get('id_string')
survey['id_string'] = new_id_string
# For flow results packages use the user defined id/uuid
elif self.id_string != survey.get('id_string'):
raise XLSFormError(
("Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." % {
'new_id': survey.get('id_string'),
'old_id': self.id_string}))
elif default_name and default_name != survey.get('name'):
survey['name'] = default_name
else:
survey['id_string'] = self.id_string
self.json = survey.to_json()
self.xml = survey.to_xml()
self.version = survey.get('version')
self.last_updated_at = timezone.now()
self.title = survey.get('title')
self._mark_start_time_boolean()
set_uuid(self)
self._set_uuid_in_xml()
self._set_hash()
if 'skip_xls_read' in kwargs:
del kwargs['skip_xls_read']
super(DataDictionary, self).save(*args, **kwargs)
def file_name(self):
return os.path.split(self.xls.name)[-1]
def sheet_to_csv(xls_content, sheet_name):
workbook = xlrd.open_workbook(file_contents=xls_content)
sheet = workbook.sheet_by_name(sheet_name)
if not sheet or sheet.nrows < 2:
raise Exception("Sheet <'%(sheet_name)s'> has no data." % {
'sheet_name': sheet_name})
csv_file = BytesIO()
writer = csv.writer(csv_file, encoding='utf-8', quoting=csv.QUOTE_ALL)
mask = [v and len(v.strip()) > 0 for v in sheet.row_values(0)]
header = [v for v, m in zip(sheet.row_values(0), mask) if m]
writer.writerow(header)
name_column = None
try:
name_column = header.index('name')
except ValueError:
pass
integer_fields = False
date_fields = False
if name_column:
name_column_values = sheet.col_values(name_column)
for index in range(len(name_column_values)):
if sheet.cell_type(index, name_column) == xlrd.XL_CELL_NUMBER:
integer_fields = True
elif sheet.cell_type(index, name_column) == xlrd.XL_CELL_DATE:
date_fields = True
for row in range(1, sheet.nrows):
if integer_fields or date_fields:
# convert integers to string/datetime if name has numbers/dates
row_values = []
for index, val in enumerate(sheet.row_values(row)):
if sheet.cell_type(row, index) == xlrd.XL_CELL_NUMBER:
try:
val = str(
float(val) if (
float(val) > int(val)
) else int(val)
)
except ValueError:
pass
elif sheet.cell_type(row, index) == xlrd.XL_CELL_DATE:
val = xlrd.xldate_as_datetime(
val, workbook.datemode).isoformat()
row_values.append(val)
writer.writerow([v for v, m in zip(row_values, mask) if m])
else:
writer.writerow(
[v for v, m in zip(sheet.row_values(row), mask) if m])
return csv_file
def set_object_permissions(sender, instance=None, created=False, **kwargs):
# seems the super is not called, have to get xform from here
xform = XForm.objects.get(pk=instance.pk)
if hasattr(instance, 'has_external_choices') \
and instance.has_external_choices:
instance.xls.seek(0)
f = sheet_to_csv(instance.xls.read(), 'external_choices')
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
data_file = InMemoryUploadedFile(
file=f,
field_name='data_file',
name='itemsets.csv',
content_type='text/csv',
size=size,
charset=None
)
MetaData.media_upload(xform, data_file)
post_save.connect(set_object_permissions, sender=DataDictionary,
dispatch_uid='xform_object_permissions')
| true
| true
|
1c459720c843885a8386143a876fd1904e17dd73
| 3,345
|
py
|
Python
|
leaderboard_service/leaderboard_service/settings.py
|
AVatch/leaderboard-service
|
9b70e24866fe862ba5d71dc3404e123303325431
|
[
"Apache-2.0"
] | 1
|
2016-02-25T22:50:22.000Z
|
2016-02-25T22:50:22.000Z
|
leaderboard_service/leaderboard_service/settings.py
|
AVatch/leaderboard-service
|
9b70e24866fe862ba5d71dc3404e123303325431
|
[
"Apache-2.0"
] | null | null | null |
leaderboard_service/leaderboard_service/settings.py
|
AVatch/leaderboard-service
|
9b70e24866fe862ba5d71dc3404e123303325431
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for leaderboard_service project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y(9k(&i#f11*to()nc^qy9nnokkwg^d(7g1zk9^p8%4!@cz)td'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
CORE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = ['rest_framework', 'rest_framework.authtoken']
APPS = ['leaderboards']
INSTALLED_APPS = CORE_APPS + THIRD_PARTY_APPS + APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leaderboard_service.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leaderboard_service.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| 26.338583
| 91
| 0.704933
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'y(9k(&i#f11*to()nc^qy9nnokkwg^d(7g1zk9^p8%4!@cz)td'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
CORE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = ['rest_framework', 'rest_framework.authtoken']
APPS = ['leaderboards']
INSTALLED_APPS = CORE_APPS + THIRD_PARTY_APPS + APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leaderboard_service.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leaderboard_service.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
1c459747e39517110330c01929492f60ac06c5aa
| 503
|
py
|
Python
|
vqseg/wordseg_algorithms.py
|
janinerugayan/VectorQuantizedCPC
|
b4e9fe6aeebca3b792ab604a770e8c3e289a46a1
|
[
"MIT"
] | null | null | null |
vqseg/wordseg_algorithms.py
|
janinerugayan/VectorQuantizedCPC
|
b4e9fe6aeebca3b792ab604a770e8c3e289a46a1
|
[
"MIT"
] | null | null | null |
vqseg/wordseg_algorithms.py
|
janinerugayan/VectorQuantizedCPC
|
b4e9fe6aeebca3b792ab604a770e8c3e289a46a1
|
[
"MIT"
] | null | null | null |
"""
Word segmentation algorithms.
Author: Herman Kamper
Contact: kamperh@gmail.com
Date: 2020
"""
from wordseg.algos import tp, puddle, dpseg, baseline, dibs, ag
import wordseg.algos
def ag(utterance_list, **kwargs):
return list(wordseg.algos.ag.segment(utterance_list, **kwargs))
def tp(utterance_list, **kwargs):
return list(wordseg.algos.tp.segment(utterance_list, **kwargs))
def dpseg(utterance_list, **kwargs):
return list(wordseg.algos.dpseg.segment(utterance_list, **kwargs))
| 21.869565
| 70
| 0.745527
|
from wordseg.algos import tp, puddle, dpseg, baseline, dibs, ag
import wordseg.algos
def ag(utterance_list, **kwargs):
return list(wordseg.algos.ag.segment(utterance_list, **kwargs))
def tp(utterance_list, **kwargs):
return list(wordseg.algos.tp.segment(utterance_list, **kwargs))
def dpseg(utterance_list, **kwargs):
return list(wordseg.algos.dpseg.segment(utterance_list, **kwargs))
| true
| true
|
1c45984c4c6ee38da52bda0420ddc998d5a7f5a2
| 2,024
|
py
|
Python
|
tests/test_git.py
|
igorbernstein2/synthtool
|
6b33cffb4301c3f05cc6976fff0022d98b47772f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_git.py
|
igorbernstein2/synthtool
|
6b33cffb4301c3f05cc6976fff0022d98b47772f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_git.py
|
igorbernstein2/synthtool
|
6b33cffb4301c3f05cc6976fff0022d98b47772f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
from synthtool.sources import git
def test_make_repo_clone_url(monkeypatch):
monkeypatch.setattr(git, "USE_SSH", True)
assert (
git.make_repo_clone_url("theacodes/nox") == "git@github.com:theacodes/nox.git"
)
def test_make_repo_clone_url_https(monkeypatch):
monkeypatch.setattr(git, "USE_SSH", False)
assert (
git.make_repo_clone_url("theacodes/nox")
== "https://github.com/theacodes/nox.git"
)
@pytest.mark.parametrize(
("input, expected"),
[
("git@github.com:theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
("https://github.com/theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
("theacodes/nox", {"owner": "theacodes", "name": "nox"}),
("theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
],
)
def test_parse_repo_url(input, expected):
assert git.parse_repo_url(input) == expected
@mock.patch("subprocess.check_output", autospec=True)
def test_get_latest_commit(check_call):
check_call.return_value = b"abc123\ncommit\nmessage."
sha, message = git.get_latest_commit()
assert sha == "abc123"
assert message == "commit\nmessage."
def test_extract_commit_message_metadata():
message = """\
Hello, world!
One: Hello!
Two: 1234
"""
metadata = git.extract_commit_message_metadata(message)
assert metadata == {"One": "Hello!", "Two": "1234"}
| 28.914286
| 88
| 0.6917
|
from unittest import mock
import pytest
from synthtool.sources import git
def test_make_repo_clone_url(monkeypatch):
monkeypatch.setattr(git, "USE_SSH", True)
assert (
git.make_repo_clone_url("theacodes/nox") == "git@github.com:theacodes/nox.git"
)
def test_make_repo_clone_url_https(monkeypatch):
monkeypatch.setattr(git, "USE_SSH", False)
assert (
git.make_repo_clone_url("theacodes/nox")
== "https://github.com/theacodes/nox.git"
)
@pytest.mark.parametrize(
("input, expected"),
[
("git@github.com:theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
("https://github.com/theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
("theacodes/nox", {"owner": "theacodes", "name": "nox"}),
("theacodes/nox.git", {"owner": "theacodes", "name": "nox"}),
],
)
def test_parse_repo_url(input, expected):
assert git.parse_repo_url(input) == expected
@mock.patch("subprocess.check_output", autospec=True)
def test_get_latest_commit(check_call):
check_call.return_value = b"abc123\ncommit\nmessage."
sha, message = git.get_latest_commit()
assert sha == "abc123"
assert message == "commit\nmessage."
def test_extract_commit_message_metadata():
message = """\
Hello, world!
One: Hello!
Two: 1234
"""
metadata = git.extract_commit_message_metadata(message)
assert metadata == {"One": "Hello!", "Two": "1234"}
| true
| true
|
1c45988afdd14740a571c6b781a72451a6d25636
| 3,162
|
py
|
Python
|
dispel4py/seismo/obspy_stream.py
|
AndreiFrunze/wrangler
|
076a07de00fc966dcf18ca6b6a6e804be5245ed9
|
[
"Apache-2.0"
] | 2
|
2017-09-07T04:33:18.000Z
|
2019-01-07T13:32:15.000Z
|
dispel4py/seismo/obspy_stream.py
|
AndreiFrunze/wrangler
|
076a07de00fc966dcf18ca6b6a6e804be5245ed9
|
[
"Apache-2.0"
] | 2
|
2016-10-06T13:07:05.000Z
|
2017-12-20T09:47:08.000Z
|
dispel4py/seismo/obspy_stream.py
|
AndreiFrunze/wrangler
|
076a07de00fc966dcf18ca6b6a6e804be5245ed9
|
[
"Apache-2.0"
] | 5
|
2016-09-01T08:38:20.000Z
|
2018-08-28T12:08:39.000Z
|
from dispel4py.seismo.seismo import SeismoPE
import traceback
INPUT_NAME = 'input'
OUTPUT_NAME = 'output'
class ObspyStreamPE(SeismoPE):
'''
A SeismoPE that calls a function to process an input stream.
'''
def __init__(self):
SeismoPE.__init__(self)
def setCompute(self, compute_fn, params={}):
'''
Define the compute function that this PE uses for processing input streams, and any input parameters for the function.
The function must have at least one input, an obspy stream, and can accept more input parameters that must be provided
before the PE is executed.
'''
self.compute_fn = compute_fn, dict(params)
def setInputTypes(self, types):
self.inout_types = { OUTPUT_NAME : types[INPUT_NAME] }
def getOutputTypes(self):
# output = input
return self.inout_types
def compute(self):
'''
Calls the processing function with the given parameters and one input stream.
'''
try:
try:
func, params = self.compute_fn
except TypeError:
func = self.compute_fn
params = {}
output = func(self, self.st, **params)
self.outputstreams.append(output)
except:
self.log(traceback.format_exc())
self.error+=traceback.format_exc()
self.log("Failed to execute function '%s' with parameters %s" % (func.__name__, params))
from dispel4py.workflow_graph import WorkflowGraph
def createProcessingComposite(chain, suffix='', controlParameters={}, provRecorder=None):
'''
Creates a composite PE wrapping a pipeline that processes obspy streams.
:param chain: list of functions that process obspy streams. The function takes one input parameter, stream, and returns an output stream.
:param requestId: id of the request that the stream is associated with
:param controlParameters: environment parameters for the processing elements
:rtype: dictionary inputs and outputs of the composite PE that was created
'''
prev = None
first = None
graph = WorkflowGraph()
for fn_desc in chain:
pe = ObspyStreamPE()
try:
fn = fn_desc[0]
params = fn_desc[1]
except TypeError:
fn = fn_desc
params = {}
pe.compute_fn = fn
pe.name = 'ObspyStreamPE_' + fn.__name__ + suffix
pe.controlParameters = controlParameters
pe.appParameters = dict(params)
pe.setCompute(fn, params)
# connect the metadata output to the provenance recorder PE if there is one
if provRecorder:
graph.connect(pe, 'metadata', provRecorder, 'metadata')
if prev:
graph.connect(prev, OUTPUT_NAME, pe, INPUT_NAME)
else:
first = pe
prev = pe
# Map inputs and outputs of the wrapper to the nodes in the subgraph
graph.inputmappings = { 'input' : (first, INPUT_NAME) }
graph.outputmappings = { 'output' : (prev, OUTPUT_NAME) }
return graph
| 34.747253
| 141
| 0.624921
|
from dispel4py.seismo.seismo import SeismoPE
import traceback
INPUT_NAME = 'input'
OUTPUT_NAME = 'output'
class ObspyStreamPE(SeismoPE):
def __init__(self):
SeismoPE.__init__(self)
def setCompute(self, compute_fn, params={}):
self.compute_fn = compute_fn, dict(params)
def setInputTypes(self, types):
self.inout_types = { OUTPUT_NAME : types[INPUT_NAME] }
def getOutputTypes(self):
return self.inout_types
def compute(self):
try:
try:
func, params = self.compute_fn
except TypeError:
func = self.compute_fn
params = {}
output = func(self, self.st, **params)
self.outputstreams.append(output)
except:
self.log(traceback.format_exc())
self.error+=traceback.format_exc()
self.log("Failed to execute function '%s' with parameters %s" % (func.__name__, params))
from dispel4py.workflow_graph import WorkflowGraph
def createProcessingComposite(chain, suffix='', controlParameters={}, provRecorder=None):
prev = None
first = None
graph = WorkflowGraph()
for fn_desc in chain:
pe = ObspyStreamPE()
try:
fn = fn_desc[0]
params = fn_desc[1]
except TypeError:
fn = fn_desc
params = {}
pe.compute_fn = fn
pe.name = 'ObspyStreamPE_' + fn.__name__ + suffix
pe.controlParameters = controlParameters
pe.appParameters = dict(params)
pe.setCompute(fn, params)
if provRecorder:
graph.connect(pe, 'metadata', provRecorder, 'metadata')
if prev:
graph.connect(prev, OUTPUT_NAME, pe, INPUT_NAME)
else:
first = pe
prev = pe
graph.inputmappings = { 'input' : (first, INPUT_NAME) }
graph.outputmappings = { 'output' : (prev, OUTPUT_NAME) }
return graph
| true
| true
|
1c4598e6f314bfee7c1a31680ad93afaa47b3067
| 4,132
|
py
|
Python
|
plugins/samanage/komand_samanage/actions/list_users/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/samanage/komand_samanage/actions/list_users/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/samanage/komand_samanage/actions/list_users/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import ListUsersInput, ListUsersOutput
# Custom imports below
class ListUsers(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="list_users",
description="List all users",
input=ListUsersInput(),
output=ListUsersOutput(),
)
def run(self, params={}):
return {"users": self.connection.api.list_users()}
def test(self):
return {
"users": [
{
"id": 4245115,
"name": "Anon",
"disabled": False,
"email": "123@service.hmail.eu",
"created_at": "2018-11-22T08:13:00.000-05:00",
"role": {
"id": 461180,
"name": "Requester",
"description": "Requester role to view and submit service request.",
"portal": True,
"show_my_tasks": False,
},
"salt": "04f20390ecf0c97571167c6c3350782663b6a7e0",
"group_ids": [4492327],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "AN"},
"mfa_enabled": False,
},
{
"id": 4244043,
"name": "Tom",
"disabled": False,
"title": "Panic",
"email": "20180913dp@gmail.com",
"created_at": "2018-11-21T12:28:31.000-05:00",
"phone": "12345678",
"mobile_phone": "87654321",
"department": {
"id": 133361,
"name": "Information Technology",
"default_assignee_id": 4485265,
},
"role": {
"id": 461179,
"name": "Service Agent User",
"description": "Almost like an administrator but no access to setup.",
"portal": False,
"show_my_tasks": False,
},
"salt": "b3e360e65de5b592ce1ff92e1d90acedbaddbcf7",
"group_ids": [4491226],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "TO"},
"mfa_enabled": False,
"reports_to": {
"id": 4485266,
"name": "Helpdesk",
"disabled": False,
"is_user": False,
"reports_to": {"id": -1, "href": "https://api.samanage.com/groups/-1.json"},
"avatar": {"type": "group", "color": "#0bc46f"},
},
"site": {"id": 96691, "name": "Headquarters", "location": "Main Office"},
},
{
"id": 4238379,
"name": "WW WW",
"disabled": False,
"email": "wwww@service.hmail.eu",
"created_at": "2018-11-20T05:29:00.000-05:00",
"last_login": "2018-11-21T17:20:46.000-05:00",
"phone": "+37254312367",
"role": {
"id": 461178,
"name": "Administrator",
"description": "This is the all powerful administrator user!",
"portal": False,
"show_my_tasks": False,
},
"salt": "7e2c35f51cc6ccdf727f7e48bc42403adbf6534d",
"group_ids": [4485265, 4485266],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "WW"},
"mfa_enabled": False,
},
]
}
| 41.32
| 100
| 0.393272
|
import komand
from .schema import ListUsersInput, ListUsersOutput
class ListUsers(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="list_users",
description="List all users",
input=ListUsersInput(),
output=ListUsersOutput(),
)
def run(self, params={}):
return {"users": self.connection.api.list_users()}
def test(self):
return {
"users": [
{
"id": 4245115,
"name": "Anon",
"disabled": False,
"email": "123@service.hmail.eu",
"created_at": "2018-11-22T08:13:00.000-05:00",
"role": {
"id": 461180,
"name": "Requester",
"description": "Requester role to view and submit service request.",
"portal": True,
"show_my_tasks": False,
},
"salt": "04f20390ecf0c97571167c6c3350782663b6a7e0",
"group_ids": [4492327],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "AN"},
"mfa_enabled": False,
},
{
"id": 4244043,
"name": "Tom",
"disabled": False,
"title": "Panic",
"email": "20180913dp@gmail.com",
"created_at": "2018-11-21T12:28:31.000-05:00",
"phone": "12345678",
"mobile_phone": "87654321",
"department": {
"id": 133361,
"name": "Information Technology",
"default_assignee_id": 4485265,
},
"role": {
"id": 461179,
"name": "Service Agent User",
"description": "Almost like an administrator but no access to setup.",
"portal": False,
"show_my_tasks": False,
},
"salt": "b3e360e65de5b592ce1ff92e1d90acedbaddbcf7",
"group_ids": [4491226],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "TO"},
"mfa_enabled": False,
"reports_to": {
"id": 4485266,
"name": "Helpdesk",
"disabled": False,
"is_user": False,
"reports_to": {"id": -1, "href": "https://api.samanage.com/groups/-1.json"},
"avatar": {"type": "group", "color": "#0bc46f"},
},
"site": {"id": 96691, "name": "Headquarters", "location": "Main Office"},
},
{
"id": 4238379,
"name": "WW WW",
"disabled": False,
"email": "wwww@service.hmail.eu",
"created_at": "2018-11-20T05:29:00.000-05:00",
"last_login": "2018-11-21T17:20:46.000-05:00",
"phone": "+37254312367",
"role": {
"id": 461178,
"name": "Administrator",
"description": "This is the all powerful administrator user!",
"portal": False,
"show_my_tasks": False,
},
"salt": "7e2c35f51cc6ccdf727f7e48bc42403adbf6534d",
"group_ids": [4485265, 4485266],
"custom_fields_values": [],
"avatar": {"type": "initials", "color": "#dfcd00", "initials": "WW"},
"mfa_enabled": False,
},
]
}
| true
| true
|
1c4598f31962fb4914c01183dfd2b5367f20731a
| 136
|
py
|
Python
|
al_phonebook/types.py
|
vtrvtr/al_phonebook
|
7bcdb7fa0323c873c523036da99b4b1616c0e00e
|
[
"MIT"
] | null | null | null |
al_phonebook/types.py
|
vtrvtr/al_phonebook
|
7bcdb7fa0323c873c523036da99b4b1616c0e00e
|
[
"MIT"
] | 1
|
2022-01-17T14:45:50.000Z
|
2022-01-17T14:45:51.000Z
|
al_phonebook/types.py
|
vtrvtr/al_phonebook
|
7bcdb7fa0323c873c523036da99b4b1616c0e00e
|
[
"MIT"
] | null | null | null |
from typing import Any, Union
import os
DictItem = dict[str, Any]
OptionalDictItem = DictItem | None
PathLike = Union[os.PathLike, str]
| 22.666667
| 34
| 0.764706
|
from typing import Any, Union
import os
DictItem = dict[str, Any]
OptionalDictItem = DictItem | None
PathLike = Union[os.PathLike, str]
| true
| true
|
1c459b1eb973ce00d988425faa2a536d4bd861cd
| 744
|
py
|
Python
|
dm_control/composer/constants.py
|
h8907283/dm_control
|
fe4449606742a7b8bec81930790b98244cddc538
|
[
"Apache-2.0"
] | 2,863
|
2018-01-03T01:38:52.000Z
|
2022-03-30T09:49:50.000Z
|
dm_control/composer/constants.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | 266
|
2018-01-03T16:00:04.000Z
|
2022-03-26T15:45:48.000Z
|
dm_control/composer/constants.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | 580
|
2018-01-03T03:17:27.000Z
|
2022-03-31T19:29:32.000Z
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module defining constant values for Composer."""
SENSOR_SITES_GROUP = 4
| 37.2
| 78
| 0.681452
|
SENSOR_SITES_GROUP = 4
| true
| true
|
1c459b5a3be59498565c981523bb698670abd0ef
| 255
|
py
|
Python
|
manage.py
|
justsostephen/track
|
b1749f7db664d76fab0c501c23f0d0705cc95fce
|
[
"MIT"
] | null | null | null |
manage.py
|
justsostephen/track
|
b1749f7db664d76fab0c501c23f0d0705cc95fce
|
[
"MIT"
] | null | null | null |
manage.py
|
justsostephen/track
|
b1749f7db664d76fab0c501c23f0d0705cc95fce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stockcontrol.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.181818
| 76
| 0.776471
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stockcontrol.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
1c459b9e9ba258120e841df624eb7641c3121e90
| 4,297
|
py
|
Python
|
aiograph/utils/html.py
|
fakegit/aiograph
|
a00aacebb04c1e743055ba524b978a06027e31ed
|
[
"MIT"
] | 45
|
2018-05-05T12:31:43.000Z
|
2022-03-23T11:20:03.000Z
|
aiographfix/utils/html.py
|
Yyonging/aiograph
|
78d291f9e1157720c949e336a9aa2711ad707285
|
[
"MIT"
] | 6
|
2019-03-04T11:23:49.000Z
|
2022-03-30T11:25:46.000Z
|
aiographfix/utils/html.py
|
Yyonging/aiograph
|
78d291f9e1157720c949e336a9aa2711ad707285
|
[
"MIT"
] | 16
|
2019-02-22T19:10:19.000Z
|
2021-09-15T22:12:55.000Z
|
from html import escape
from html.entities import name2codepoint
from html.parser import HTMLParser
from typing import List, Union
import attr
from ..types import NodeElement
ALLOWED_TAGS = [
'a', 'aside', 'b', 'blockquote', 'br', 'code', 'em', 'figcaption', 'figure',
'h3', 'h4', 'hr', 'i', 'iframe', 'img', 'li', 'ol', 'p', 'pre', 's',
'strong', 'u', 'ul', 'video'
]
VOID_ELEMENTS = {
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'
}
ALLOWED_ATTRS = ['href', 'src']
def node_to_html(node: Union[str, NodeElement, list]) -> str:
"""
Convert Nodes to HTML
:param node:
:return:
"""
if isinstance(node, str): # Text
return escape(node)
elif isinstance(node, list): # List of nodes
result = ''
for child_node in node:
result += node_to_html(child_node)
return result
elif not isinstance(node, NodeElement):
raise TypeError(f"Node must be instance of str or NodeElement, not {type(node)}")
# NodeElement
# Open
result = "<" + node.tag
if node.attrs:
result += ' ' + ' '.join(f"{k}=\"{v}\"" for k, v in node.attrs.items())
if node.tag in VOID_ELEMENTS: # Close void element
result += '/>'
else:
result += '>'
for child_node in node.children: # Container body
result += node_to_html(child_node)
result += '</' + node.tag + '>' # Close tag
return result
def html_to_nodes(html_content: str) -> List[Union[str, NodeElement]]:
"""
Convert HTML code to Nodes
:param html_content:
:return:
"""
parser = HtmlToNodesParser()
parser.feed(html_content)
return parser.get_nodes()
def _node_converter_filter(attribute, value) -> bool:
return bool(value)
def nodes_to_json(nodes: List[Union[str, NodeElement]]) -> List[Union[str, dict]]:
"""
Convert Nodes to JSON
:param nodes:
:return:
"""
result = []
for node in nodes:
if isinstance(node, str):
result.append(node)
elif isinstance(node, NodeElement):
result.append(attr.asdict(node, filter=_node_converter_filter))
return result
def html_to_json(content: str) -> List[Union[str, dict]]:
"""
Convert HTML to JSON
:param content:
:return:
"""
return nodes_to_json(html_to_nodes(content))
class HtmlToNodesParser(HTMLParser):
def __init__(self):
super(HtmlToNodesParser, self).__init__()
self.current_nodes = []
self.parent_nodes = []
def error(self, message):
raise ValueError(message)
def add_str_node(self, s):
if self.current_nodes and isinstance(self.current_nodes[-1], str):
self.current_nodes[-1] += s
else:
self.current_nodes.append(s)
def handle_starttag(self, tag, attrs_list):
if tag not in ALLOWED_TAGS:
self.error(f"{tag} tag is not allowed")
node = NodeElement(tag=tag)
if attrs_list:
for attr, value in attrs_list:
node.attrs[attr] = value
self.current_nodes.append(node)
if tag not in VOID_ELEMENTS:
self.parent_nodes.append(self.current_nodes)
self.current_nodes = node.children = []
def handle_endtag(self, tag):
if tag in VOID_ELEMENTS:
return
self.current_nodes = self.parent_nodes.pop()
last_node = self.current_nodes[-1]
if last_node.tag != tag:
self.error(f"\"{tag}\" tag closed instead of \"{last_node.tag}\"")
if not last_node.children:
last_node.children.clear()
def handle_data(self, data):
self.add_str_node(data)
def handle_entityref(self, name):
self.add_str_node(chr(name2codepoint[name]))
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
self.add_str_node(c)
def get_nodes(self):
if self.parent_nodes:
not_closed_tag = self.parent_nodes[-1][-1].tag
self.error(f"\"{not_closed_tag}\" tag is not closed")
return self.current_nodes
| 25.577381
| 89
| 0.594601
|
from html import escape
from html.entities import name2codepoint
from html.parser import HTMLParser
from typing import List, Union
import attr
from ..types import NodeElement
ALLOWED_TAGS = [
'a', 'aside', 'b', 'blockquote', 'br', 'code', 'em', 'figcaption', 'figure',
'h3', 'h4', 'hr', 'i', 'iframe', 'img', 'li', 'ol', 'p', 'pre', 's',
'strong', 'u', 'ul', 'video'
]
VOID_ELEMENTS = {
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'
}
ALLOWED_ATTRS = ['href', 'src']
def node_to_html(node: Union[str, NodeElement, list]) -> str:
if isinstance(node, str):
return escape(node)
elif isinstance(node, list):
result = ''
for child_node in node:
result += node_to_html(child_node)
return result
elif not isinstance(node, NodeElement):
raise TypeError(f"Node must be instance of str or NodeElement, not {type(node)}")
result = "<" + node.tag
if node.attrs:
result += ' ' + ' '.join(f"{k}=\"{v}\"" for k, v in node.attrs.items())
if node.tag in VOID_ELEMENTS:
result += '/>'
else:
result += '>'
for child_node in node.children:
result += node_to_html(child_node)
result += '</' + node.tag + '>'
return result
def html_to_nodes(html_content: str) -> List[Union[str, NodeElement]]:
parser = HtmlToNodesParser()
parser.feed(html_content)
return parser.get_nodes()
def _node_converter_filter(attribute, value) -> bool:
return bool(value)
def nodes_to_json(nodes: List[Union[str, NodeElement]]) -> List[Union[str, dict]]:
result = []
for node in nodes:
if isinstance(node, str):
result.append(node)
elif isinstance(node, NodeElement):
result.append(attr.asdict(node, filter=_node_converter_filter))
return result
def html_to_json(content: str) -> List[Union[str, dict]]:
return nodes_to_json(html_to_nodes(content))
class HtmlToNodesParser(HTMLParser):
def __init__(self):
super(HtmlToNodesParser, self).__init__()
self.current_nodes = []
self.parent_nodes = []
def error(self, message):
raise ValueError(message)
def add_str_node(self, s):
if self.current_nodes and isinstance(self.current_nodes[-1], str):
self.current_nodes[-1] += s
else:
self.current_nodes.append(s)
def handle_starttag(self, tag, attrs_list):
if tag not in ALLOWED_TAGS:
self.error(f"{tag} tag is not allowed")
node = NodeElement(tag=tag)
if attrs_list:
for attr, value in attrs_list:
node.attrs[attr] = value
self.current_nodes.append(node)
if tag not in VOID_ELEMENTS:
self.parent_nodes.append(self.current_nodes)
self.current_nodes = node.children = []
def handle_endtag(self, tag):
if tag in VOID_ELEMENTS:
return
self.current_nodes = self.parent_nodes.pop()
last_node = self.current_nodes[-1]
if last_node.tag != tag:
self.error(f"\"{tag}\" tag closed instead of \"{last_node.tag}\"")
if not last_node.children:
last_node.children.clear()
def handle_data(self, data):
self.add_str_node(data)
def handle_entityref(self, name):
self.add_str_node(chr(name2codepoint[name]))
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
self.add_str_node(c)
def get_nodes(self):
if self.parent_nodes:
not_closed_tag = self.parent_nodes[-1][-1].tag
self.error(f"\"{not_closed_tag}\" tag is not closed")
return self.current_nodes
| true
| true
|
1c459cb9695ce51149e5eae19d31908ca788d5d5
| 6,562
|
py
|
Python
|
seq2seq/tasks/decode_text.py
|
chunfengh/seq2seq
|
cc6e1a15f523c2ead809d48b1f6eebbeb94e3f0b
|
[
"Apache-2.0"
] | null | null | null |
seq2seq/tasks/decode_text.py
|
chunfengh/seq2seq
|
cc6e1a15f523c2ead809d48b1f6eebbeb94e3f0b
|
[
"Apache-2.0"
] | null | null | null |
seq2seq/tasks/decode_text.py
|
chunfengh/seq2seq
|
cc6e1a15f523c2ead809d48b1f6eebbeb94e3f0b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from pydoc import locate
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_prediction_length(predictions_dict):
"""Returns the length of the prediction based on the index
of the first SEQUENCE_END token.
"""
tokens_iter = enumerate(predictions_dict["predicted_tokens"])
return next(((i + 1) for i, _ in tokens_iter if _ == "SEQUENCE_END"),
len(predictions_dict["predicted_tokens"]))
def _get_unk_mapping(filename):
"""Reads a file that specifies a mapping from source to target tokens.
The file must contain lines of the form <source>\t<target>"
Args:
filename: path to the mapping file
Returns:
A dictionary that maps from source -> target tokens.
"""
with gfile.GFile(filename, "r") as mapping_file:
lines = mapping_file.readlines()
mapping = dict([_.split("\t")[0:2] for _ in lines])
mapping = {k.strip(): v.strip() for k, v in mapping.items()}
return mapping
def _unk_replace(source_tokens,
predicted_tokens,
attention_scores,
mapping=None):
"""Replaces UNK tokens with tokens from the source or a
provided mapping based on the attention scores.
Args:
source_tokens: A numpy array of strings.
predicted_tokens: A numpy array of strings.
attention_scores: A numeric numpy array
of shape `[prediction_length, source_length]` that contains
the attention scores.
mapping: If not provided, an UNK token is replaced with the
source token that has the highest attention score. If provided
the token is insead replaced with `mapping[chosen_source_token]`.
Returns:
A new `predicted_tokens` array.
"""
result = []
for token, scores in zip(predicted_tokens, attention_scores):
if token == "UNK":
max_score_index = np.argmax(scores)
chosen_source_token = source_tokens[max_score_index]
new_target = chosen_source_token
if mapping is not None and chosen_source_token in mapping:
new_target = mapping[chosen_source_token]
result.append(new_target)
else:
result.append(token)
return np.array(result)
class DecodeText(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DecodeText, self).__init__(params)
self._unk_mapping = None
self._unk_replace_fn = None
if self.params["unk_mapping"] is not None:
self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
if self.params["unk_replace"]:
self._unk_replace_fn = functools.partial(
_unk_replace, mapping=self._unk_mapping)
self._postproc_fn = None
if self.params["postproc_fn"]:
self._postproc_fn = locate(self.params["postproc_fn"])
if self._postproc_fn is None:
raise ValueError("postproc_fn not found: {}".format(
self.params["postproc_fn"]))
@staticmethod
def default_params():
params = {}
params.update({
"delimiter": " ",
"postproc_fn": "",
"unk_replace": False,
"unk_mapping": None,
})
return params
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
if "attention_scores" in self._predictions:
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
print (fetches_batch)
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
predicted_tokens = fetches["predicted_tokens"]
# If we're using beam search we take the first beam
if np.ndim(predicted_tokens) > 1:
predicted_tokens = predicted_tokens[:, 0]
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
source_tokens = fetches["features.source_tokens"]
source_len = fetches["features.source_len"]
if self._unk_replace_fn is not None:
# We slice the attention scores so that we do not
# accidentially replace UNK with a SEQUENCE_END token
attention_scores = fetches["attention_scores"]
attention_scores = attention_scores[:, :source_len - 1]
predicted_tokens = self._unk_replace_fn(
source_tokens=source_tokens,
predicted_tokens=predicted_tokens,
attention_scores=attention_scores)
sent = self.params["delimiter"].join(predicted_tokens).split(
"SEQUENCE_END")[0]
# Apply postproc
if self._postproc_fn:
sent = self._postproc_fn(sent)
sent = sent.strip()
print(sent.encode('utf-8'))
| 34.536842
| 77
| 0.704358
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from pydoc import locate
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_prediction_length(predictions_dict):
tokens_iter = enumerate(predictions_dict["predicted_tokens"])
return next(((i + 1) for i, _ in tokens_iter if _ == "SEQUENCE_END"),
len(predictions_dict["predicted_tokens"]))
def _get_unk_mapping(filename):
with gfile.GFile(filename, "r") as mapping_file:
lines = mapping_file.readlines()
mapping = dict([_.split("\t")[0:2] for _ in lines])
mapping = {k.strip(): v.strip() for k, v in mapping.items()}
return mapping
def _unk_replace(source_tokens,
predicted_tokens,
attention_scores,
mapping=None):
result = []
for token, scores in zip(predicted_tokens, attention_scores):
if token == "UNK":
max_score_index = np.argmax(scores)
chosen_source_token = source_tokens[max_score_index]
new_target = chosen_source_token
if mapping is not None and chosen_source_token in mapping:
new_target = mapping[chosen_source_token]
result.append(new_target)
else:
result.append(token)
return np.array(result)
class DecodeText(InferenceTask):
def __init__(self, params):
super(DecodeText, self).__init__(params)
self._unk_mapping = None
self._unk_replace_fn = None
if self.params["unk_mapping"] is not None:
self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
if self.params["unk_replace"]:
self._unk_replace_fn = functools.partial(
_unk_replace, mapping=self._unk_mapping)
self._postproc_fn = None
if self.params["postproc_fn"]:
self._postproc_fn = locate(self.params["postproc_fn"])
if self._postproc_fn is None:
raise ValueError("postproc_fn not found: {}".format(
self.params["postproc_fn"]))
@staticmethod
def default_params():
params = {}
params.update({
"delimiter": " ",
"postproc_fn": "",
"unk_replace": False,
"unk_mapping": None,
})
return params
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
if "attention_scores" in self._predictions:
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
print (fetches_batch)
for fetches in unbatch_dict(fetches_batch):
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
predicted_tokens = fetches["predicted_tokens"]
if np.ndim(predicted_tokens) > 1:
predicted_tokens = predicted_tokens[:, 0]
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
source_tokens = fetches["features.source_tokens"]
source_len = fetches["features.source_len"]
if self._unk_replace_fn is not None:
# We slice the attention scores so that we do not
# accidentially replace UNK with a SEQUENCE_END token
attention_scores = fetches["attention_scores"]
attention_scores = attention_scores[:, :source_len - 1]
predicted_tokens = self._unk_replace_fn(
source_tokens=source_tokens,
predicted_tokens=predicted_tokens,
attention_scores=attention_scores)
sent = self.params["delimiter"].join(predicted_tokens).split(
"SEQUENCE_END")[0]
# Apply postproc
if self._postproc_fn:
sent = self._postproc_fn(sent)
sent = sent.strip()
print(sent.encode('utf-8'))
| true
| true
|
1c459cbafa7959829a0eb6b44a0612c737b7663e
| 782
|
py
|
Python
|
jorldy/config/m_dqn/cartpole.py
|
Kyushik/JORLDY
|
6a24a2195e5e87ade157ee53f631af2221f0a188
|
[
"Apache-2.0"
] | 300
|
2021-11-03T07:06:34.000Z
|
2022-03-24T02:23:56.000Z
|
jorldy/config/m_dqn/cartpole.py
|
Kyushik/JORLDY
|
6a24a2195e5e87ade157ee53f631af2221f0a188
|
[
"Apache-2.0"
] | 37
|
2021-11-04T04:31:07.000Z
|
2022-03-30T01:40:49.000Z
|
jorldy/config/m_dqn/cartpole.py
|
Kyushik/JORLDY
|
6a24a2195e5e87ade157ee53f631af2221f0a188
|
[
"Apache-2.0"
] | 45
|
2021-11-03T08:05:56.000Z
|
2022-03-24T08:35:05.000Z
|
### Munchausen DQN CartPole Config ###
env = {
"name": "cartpole",
"action_type": "discrete",
"render": False,
}
agent = {
"name": "m_dqn",
"network": "discrete_q_network",
"gamma": 0.99,
"epsilon_init": 1.0,
"epsilon_min": 0.01,
"explore_ratio": 0.2,
"buffer_size": 50000,
"batch_size": 32,
"start_train_step": 2000,
"target_update_period": 500,
"lr_decay": True,
# M-DQN Parameters
"alpha": 0.9,
"tau": 0.03,
"l_0": -1,
}
optim = {
"name": "adam",
"lr": 0.0001,
}
train = {
"training": True,
"load_path": None,
"run_step": 100000,
"print_period": 1000,
"save_period": 10000,
"eval_iteration": 10,
# distributed setting
"update_period": 32,
"num_workers": 8,
}
| 18.186047
| 38
| 0.553708
|
False,
}
agent = {
"name": "m_dqn",
"network": "discrete_q_network",
"gamma": 0.99,
"epsilon_init": 1.0,
"epsilon_min": 0.01,
"explore_ratio": 0.2,
"buffer_size": 50000,
"batch_size": 32,
"start_train_step": 2000,
"target_update_period": 500,
"lr_decay": True,
"alpha": 0.9,
"tau": 0.03,
"l_0": -1,
}
optim = {
"name": "adam",
"lr": 0.0001,
}
train = {
"training": True,
"load_path": None,
"run_step": 100000,
"print_period": 1000,
"save_period": 10000,
"eval_iteration": 10,
"update_period": 32,
"num_workers": 8,
}
| true
| true
|
1c459d4fd01576a1d2a19cab06b15dcefae8bd24
| 336
|
py
|
Python
|
setup.py
|
ippee/py_init
|
0d997ec5ddaee95ef71562f14542e74f40e88646
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
ippee/py_init
|
0d997ec5ddaee95ef71562f14542e74f40e88646
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
ippee/py_init
|
0d997ec5ddaee95ef71562f14542e74f40e88646
|
[
"CC0-1.0"
] | null | null | null |
# coding: UTF-8
from setuptools import setup
install_requires = []
packages = []
setup(
name='',
version='0.1.0',
license='',
description='',
author='you',
author_email='',
url='',
packages=packages,
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=['pytest', "pytest-cov"]
)
| 15.272727
| 40
| 0.660714
|
from setuptools import setup
install_requires = []
packages = []
setup(
name='',
version='0.1.0',
license='',
description='',
author='you',
author_email='',
url='',
packages=packages,
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=['pytest', "pytest-cov"]
)
| true
| true
|
1c459d5ed4db13f7e8ef93008315c97790ecb9b7
| 5,618
|
py
|
Python
|
commitizen/commands/init.py
|
christian-hawk/commitizen
|
5c0dd546866f2bd2ab6b4ecd27035441b7b4692b
|
[
"MIT"
] | null | null | null |
commitizen/commands/init.py
|
christian-hawk/commitizen
|
5c0dd546866f2bd2ab6b4ecd27035441b7b4692b
|
[
"MIT"
] | null | null | null |
commitizen/commands/init.py
|
christian-hawk/commitizen
|
5c0dd546866f2bd2ab6b4ecd27035441b7b4692b
|
[
"MIT"
] | null | null | null |
import os
import questionary
import yaml
from packaging.version import Version
from commitizen import cmd, factory, out
from commitizen.__version__ import __version__
from commitizen.config import BaseConfig, TomlConfig
from commitizen.cz import registry
from commitizen.defaults import config_files
from commitizen.exceptions import NoAnswersError
from commitizen.git import get_latest_tag_name, get_tag_names
class Init:
def __init__(self, config: BaseConfig, *args):
self.config: BaseConfig = config
self.cz = factory.commiter_factory(self.config)
def __call__(self):
values_to_add = {}
# No config for commitizen exist
if not self.config.path:
config_path = self._ask_config_path()
if "toml" in config_path:
self.config = TomlConfig(data="", path=config_path)
self.config.init_empty_config_content()
values_to_add["name"] = self._ask_name()
tag = self._ask_tag()
values_to_add["version"] = Version(tag).public
values_to_add["tag_format"] = self._ask_tag_format(tag)
self._update_config_file(values_to_add)
if questionary.confirm("Do you want to install pre-commit hook?").ask():
self._install_pre_commit_hook()
out.write("You can bump the version and create changelog running:\n")
out.info("cz bump --changelog")
out.success("The configuration are all set.")
else:
out.line(f"Config file {self.config.path} already exists")
def _ask_config_path(self) -> str:
name = questionary.select(
"Please choose a supported config file: (default: pyproject.toml)",
choices=config_files,
default="pyproject.toml",
style=self.cz.style,
).ask()
return name
def _ask_name(self) -> str:
name = questionary.select(
"Please choose a cz (commit rule): (default: cz_conventional_commits)",
choices=list(registry.keys()),
default="cz_conventional_commits",
style=self.cz.style,
).ask()
return name
def _ask_tag(self) -> str:
latest_tag = get_latest_tag_name()
if not latest_tag:
out.error("No Existing Tag. Set tag to v0.0.1")
return "0.0.1"
is_correct_tag = questionary.confirm(
f"Is {latest_tag} the latest tag?", style=self.cz.style, default=False
).ask()
if not is_correct_tag:
tags = get_tag_names()
if not tags:
out.error("No Existing Tag. Set tag to v0.0.1")
return "0.0.1"
latest_tag = questionary.select(
"Please choose the latest tag: ",
choices=get_tag_names(),
style=self.cz.style,
).ask()
if not latest_tag:
raise NoAnswersError("Tag is required!")
return latest_tag
def _ask_tag_format(self, latest_tag) -> str:
is_correct_format = False
if latest_tag.startswith("v"):
tag_format = r"v$version"
is_correct_format = questionary.confirm(
f'Is "{tag_format}" the correct tag format?', style=self.cz.style
).ask()
if not is_correct_format:
tag_format = questionary.text(
'Please enter the correct version format: (default: "$version")',
style=self.cz.style,
).ask()
if not tag_format:
tag_format = "$version"
return tag_format
def _install_pre_commit_hook(self):
pre_commit_config_filename = ".pre-commit-config.yaml"
cz_hook_config = {
"repo": "https://github.com/commitizen-tools/commitizen",
"rev": f"v{__version__}",
"hooks": [{"id": "commitizen", "stages": ["commit-msg"]}],
}
config_data = {}
if not os.path.isfile(pre_commit_config_filename):
# .pre-commit-config does not exist
config_data["repos"] = [cz_hook_config]
else:
# breakpoint()
with open(pre_commit_config_filename) as config_file:
yaml_data = yaml.safe_load(config_file)
if yaml_data:
config_data = yaml_data
if "repos" in config_data:
for pre_commit_hook in config_data["repos"]:
if "commitizen" in pre_commit_hook["repo"]:
out.write("commitizen already in pre-commit config")
break
else:
config_data["repos"].append(cz_hook_config)
else:
# .pre-commit-config exists but there's no "repos" key
config_data["repos"] = [cz_hook_config]
with open(pre_commit_config_filename, "w") as config_file:
yaml.safe_dump(config_data, stream=config_file)
c = cmd.run("pre-commit install --hook-type commit-msg")
if c.return_code == 127:
out.error(
"pre-commit is not installed in current environement.\n"
"Run 'pre-commit install --hook-type commit-msg' again after it's installed"
)
elif c.return_code != 0:
out.error(c.err)
else:
out.write("commitizen pre-commit hook is now installed in your '.git'\n")
def _update_config_file(self, values):
for key, value in values.items():
self.config.set_key(key, value)
| 36.245161
| 92
| 0.58455
|
import os
import questionary
import yaml
from packaging.version import Version
from commitizen import cmd, factory, out
from commitizen.__version__ import __version__
from commitizen.config import BaseConfig, TomlConfig
from commitizen.cz import registry
from commitizen.defaults import config_files
from commitizen.exceptions import NoAnswersError
from commitizen.git import get_latest_tag_name, get_tag_names
class Init:
def __init__(self, config: BaseConfig, *args):
self.config: BaseConfig = config
self.cz = factory.commiter_factory(self.config)
def __call__(self):
values_to_add = {}
if not self.config.path:
config_path = self._ask_config_path()
if "toml" in config_path:
self.config = TomlConfig(data="", path=config_path)
self.config.init_empty_config_content()
values_to_add["name"] = self._ask_name()
tag = self._ask_tag()
values_to_add["version"] = Version(tag).public
values_to_add["tag_format"] = self._ask_tag_format(tag)
self._update_config_file(values_to_add)
if questionary.confirm("Do you want to install pre-commit hook?").ask():
self._install_pre_commit_hook()
out.write("You can bump the version and create changelog running:\n")
out.info("cz bump --changelog")
out.success("The configuration are all set.")
else:
out.line(f"Config file {self.config.path} already exists")
def _ask_config_path(self) -> str:
name = questionary.select(
"Please choose a supported config file: (default: pyproject.toml)",
choices=config_files,
default="pyproject.toml",
style=self.cz.style,
).ask()
return name
def _ask_name(self) -> str:
name = questionary.select(
"Please choose a cz (commit rule): (default: cz_conventional_commits)",
choices=list(registry.keys()),
default="cz_conventional_commits",
style=self.cz.style,
).ask()
return name
def _ask_tag(self) -> str:
latest_tag = get_latest_tag_name()
if not latest_tag:
out.error("No Existing Tag. Set tag to v0.0.1")
return "0.0.1"
is_correct_tag = questionary.confirm(
f"Is {latest_tag} the latest tag?", style=self.cz.style, default=False
).ask()
if not is_correct_tag:
tags = get_tag_names()
if not tags:
out.error("No Existing Tag. Set tag to v0.0.1")
return "0.0.1"
latest_tag = questionary.select(
"Please choose the latest tag: ",
choices=get_tag_names(),
style=self.cz.style,
).ask()
if not latest_tag:
raise NoAnswersError("Tag is required!")
return latest_tag
def _ask_tag_format(self, latest_tag) -> str:
is_correct_format = False
if latest_tag.startswith("v"):
tag_format = r"v$version"
is_correct_format = questionary.confirm(
f'Is "{tag_format}" the correct tag format?', style=self.cz.style
).ask()
if not is_correct_format:
tag_format = questionary.text(
'Please enter the correct version format: (default: "$version")',
style=self.cz.style,
).ask()
if not tag_format:
tag_format = "$version"
return tag_format
def _install_pre_commit_hook(self):
pre_commit_config_filename = ".pre-commit-config.yaml"
cz_hook_config = {
"repo": "https://github.com/commitizen-tools/commitizen",
"rev": f"v{__version__}",
"hooks": [{"id": "commitizen", "stages": ["commit-msg"]}],
}
config_data = {}
if not os.path.isfile(pre_commit_config_filename):
config_data["repos"] = [cz_hook_config]
else:
with open(pre_commit_config_filename) as config_file:
yaml_data = yaml.safe_load(config_file)
if yaml_data:
config_data = yaml_data
if "repos" in config_data:
for pre_commit_hook in config_data["repos"]:
if "commitizen" in pre_commit_hook["repo"]:
out.write("commitizen already in pre-commit config")
break
else:
config_data["repos"].append(cz_hook_config)
else:
config_data["repos"] = [cz_hook_config]
with open(pre_commit_config_filename, "w") as config_file:
yaml.safe_dump(config_data, stream=config_file)
c = cmd.run("pre-commit install --hook-type commit-msg")
if c.return_code == 127:
out.error(
"pre-commit is not installed in current environement.\n"
"Run 'pre-commit install --hook-type commit-msg' again after it's installed"
)
elif c.return_code != 0:
out.error(c.err)
else:
out.write("commitizen pre-commit hook is now installed in your '.git'\n")
def _update_config_file(self, values):
for key, value in values.items():
self.config.set_key(key, value)
| true
| true
|
1c459db6c393559c2cd965467577c6bdcb250d28
| 1,090
|
py
|
Python
|
hpc-historias-clinicas/fojas_quirurgicas/migrations/0005_auto_20150505_0101.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/fojas_quirurgicas/migrations/0005_auto_20150505_0101.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/fojas_quirurgicas/migrations/0005_auto_20150505_0101.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('fojas_quirurgicas', '0004_auto_20150504_2120'),
]
operations = [
migrations.AlterField(
model_name='fojasquirurgicas',
name='fecha',
field=models.DateField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702694)),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_comienzo',
field=models.TimeField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702739), verbose_name='Hora / Comienzo Operac\xf3n'),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_fin',
field=models.TimeField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702778), verbose_name='Hora / Termin\xf3 Operac\xf3n'),
preserve_default=True,
),
]
| 32.058824
| 137
| 0.612844
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('fojas_quirurgicas', '0004_auto_20150504_2120'),
]
operations = [
migrations.AlterField(
model_name='fojasquirurgicas',
name='fecha',
field=models.DateField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702694)),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_comienzo',
field=models.TimeField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702739), verbose_name='Hora / Comienzo Operac\xf3n'),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_fin',
field=models.TimeField(default=datetime.datetime(2015, 5, 5, 1, 1, 1, 702778), verbose_name='Hora / Termin\xf3 Operac\xf3n'),
preserve_default=True,
),
]
| true
| true
|
1c459dbc87ad166cc650a1298f694761c0c2d4ae
| 14,998
|
py
|
Python
|
utils.py
|
RachithP/rpg_public_dronet
|
244b44c6d321e77cfe326071f8413ea1f7e438cb
|
[
"MIT"
] | null | null | null |
utils.py
|
RachithP/rpg_public_dronet
|
244b44c6d321e77cfe326071f8413ea1f7e438cb
|
[
"MIT"
] | null | null | null |
utils.py
|
RachithP/rpg_public_dronet
|
244b44c6d321e77cfe326071f8413ea1f7e438cb
|
[
"MIT"
] | 1
|
2019-12-10T02:48:20.000Z
|
2019-12-10T02:48:20.000Z
|
import re
import os
import numpy as np
import tensorflow as tf
import json
import time
from keras import backend as K
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.generic_utils import Progbar
from keras.models import model_from_json
import img_utils
class DroneDataGenerator(ImageDataGenerator):
"""
Generate minibatches of images and labels with real-time augmentation.
The only function that changes w.r.t. parent class is the flow that
generates data. This function needed in fact adaptation for different
directory structure and labels. All the remaining functions remain
unchanged.
For an example usage, see the evaluate.py script
"""
def flow_from_directory(self, directory, target_size=(224,224),
crop_size=(250,250), color_mode='grayscale', batch_size=32,
shuffle=True, seed=None, follow_links=False):
return DroneDirectoryIterator(
directory, self,
target_size=target_size, crop_size=crop_size, color_mode=color_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
follow_links=follow_links)
class DroneDirectoryIterator(Iterator):
"""
Class for managing data loading.of images and labels
We assume that the folder structure is:
root_folder/
folder_1/
images/
sync_steering.txt or labels.txt
folder_2/
images/
sync_steering.txt or labels.txt
.
.
folder_n/
images/
sync_steering.txt or labels.txt
# Arguments
directory: Path to the root directory to read data from.
image_data_generator: Image Generator.
target_size: tuple of integers, dimensions to resize input images to.
crop_size: tuple of integers, dimensions to crop input images.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
batch_size: The desired batch size
shuffle: Whether to shuffle data or not
seed : numpy seed to shuffle data
follow_links: Bool, whether to follow symbolic links or not
# TODO: Add functionality to save images to have a look at the augmentation
"""
def __init__(self, directory, image_data_generator,
target_size=(224,224), crop_size = (250,250), color_mode='grayscale',
batch_size=32, shuffle=True, seed=None, follow_links=False):
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.crop_size = tuple(crop_size)
self.follow_links = follow_links
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
if self.color_mode == 'rgb':
self.image_shape = self.crop_size + (3,)
else:
self.image_shape = self.crop_size + (1,)
# First count how many experiments are out there
self.samples = 0
experiments = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
experiments.append(subdir)
self.num_experiments = len(experiments)
self.formats = {'png', 'jpg'}
# Idea = associate each filename with a corresponding steering or label
self.filenames = []
self.ground_truth = []
# Determine the type of experiment (steering or collision) to compute
# the loss
self.exp_type = []
for subdir in experiments:
subpath = os.path.join(directory, subdir)
self._decode_experiment_dir(subpath)
# Conversion of list into array
self.ground_truth = np.array(self.ground_truth, dtype = K.floatx())
assert self.samples > 0, "Did not find any data"
print('Found {} images belonging to {} experiments.'.format(
self.samples, self.num_experiments))
super(DroneDirectoryIterator, self).__init__(self.samples,
batch_size, shuffle, seed)
def _recursive_list(self, subpath):
return sorted(os.walk(subpath, followlinks=self.follow_links),
key=lambda tpl: tpl[0])
def _decode_experiment_dir(self, dir_subpath):
# Load steerings or labels in the experiment dir
steerings_filename = os.path.join(dir_subpath, "sync_steering.txt")
labels_filename = os.path.join(dir_subpath, "labels.txt")
# Try to load steerings first. Make sure that the steering angle or the
# label file is in the first column. Note also that the first line are
# comments so it should be skipped.
try:
ground_truth = np.loadtxt(steerings_filename, usecols=0,
delimiter=',', skiprows=1)
exp_type = 1
except OSError as e:
# Try load collision labels if there are no steerings
try:
ground_truth = np.loadtxt(labels_filename, usecols=0)
exp_type = 0
except OSError as e:
print("Neither steerings nor labels found in dir {}".format(
dir_subpath))
raise IOError
# Now fetch all images in the image subdir
image_dir_path = os.path.join(dir_subpath, "images")
for root, _, files in self._recursive_list(image_dir_path):
sorted_files = sorted(files,
key = lambda fname: int(re.search(r'\d+',fname).group()))
for frame_number, fname in enumerate(sorted_files):
is_valid = False
for extension in self.formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
absolute_path = os.path.join(root, fname)
self.filenames.append(os.path.relpath(absolute_path,
self.directory))
self.ground_truth.append(ground_truth[frame_number])
self.exp_type.append(exp_type)
self.samples += 1
def next(self):
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array) :
"""
Public function to fetch next batch.
# Returns
The next batch of images and labels.
"""
current_batch_size = index_array.shape[0]
# Image transformation is not under thread lock, so it can be done in
# parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape,
dtype=K.floatx())
batch_steer = np.zeros((current_batch_size, 2,),
dtype=K.floatx())
batch_coll = np.zeros((current_batch_size, 2,),
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# Build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
x = img_utils.load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
crop_size=self.crop_size,
target_size=self.target_size)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# Build batch of steering and collision data
if self.exp_type[index_array[i]] == 1:
# Steering experiment (t=1)
batch_steer[i,0] =1.0
batch_steer[i,1] = self.ground_truth[index_array[i]]
batch_coll[i] = np.array([1.0, 0.0])
else:
# Collision experiment (t=0)
batch_steer[i] = np.array([0.0, 0.0])
batch_coll[i,0] = 0.0
batch_coll[i,1] = self.ground_truth[index_array[i]]
batch_y = [batch_steer, batch_coll]
return batch_x, batch_y
def compute_predictions_and_gt(model, generator, steps,
max_q_size=10,
pickle_safe=False, verbose=0):
"""
Generate predictions and associated ground truth
for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Function adapted from keras `predict_generator`.
# Arguments
generator: Generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: Maximum size for the generator queue.
pickle_safe: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions and associated ground truth.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
steps_done = 0
all_outs = []
all_labels = []
all_ts = []
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(generator)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, gt_lab = generator_output
elif len(generator_output) == 3:
x, gt_lab, _ = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
else:
raise ValueError('Output not valid for current evaluation')
start_time = time.time()
outs = model.predict_on_batch(x)
time_diff = time.time() - start_time
print("\n Time Diff: ", time_diff)
print("Batch Size: ", len(x))
print("FPS: ", len(x)/time_diff)
if not isinstance(outs, list):
outs = [outs]
if not isinstance(gt_lab, list):
gt_lab = [gt_lab]
if not all_outs:
for out in outs:
# Len of this list is related to the number of
# outputs per model(1 in our case)
all_outs.append([])
if not all_labels:
# Len of list related to the number of gt_commands
# per model (1 in our case )
for lab in gt_lab:
all_labels.append([])
all_ts.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
for i, lab in enumerate(gt_lab):
all_labels[i].append(lab[:,1])
all_ts[i].append(lab[:,0])
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
if steps_done == 1:
return [out for out in all_outs], [lab for lab in all_labels], np.concatenate(all_ts[0])
else:
return np.squeeze(np.array([np.concatenate(out) for out in all_outs])).T, \
np.array([np.concatenate(lab) for lab in all_labels]).T, \
np.concatenate(all_ts[0])
def hard_mining_mse(k):
"""
Compute MSE for steering evaluation and hard-mining for the current batch.
# Arguments
k: number of samples for hard-mining.
# Returns
custom_mse: average MSE for the current batch.
"""
def custom_mse(y_true, y_pred):
# Parameter t indicates the type of experiment
t = y_true[:,0]
# Number of steering samples
samples_steer = tf.cast(tf.equal(t,1), tf.int32)
n_samples_steer = tf.reduce_sum(samples_steer)
if n_samples_steer == 0:
return 0.0
else:
# Predicted and real steerings
pred_steer = tf.squeeze(y_pred, squeeze_dims=-1)
true_steer = y_true[:,1]
# Steering loss
l_steer = tf.multiply(t, K.square(pred_steer - true_steer))
# Hard mining
k_min = tf.minimum(k, n_samples_steer)
_, indices = tf.nn.top_k(l_steer, k=k_min)
max_l_steer = tf.gather(l_steer, indices)
hard_l_steer = tf.divide(tf.reduce_sum(max_l_steer), tf.cast(k,tf.float32))
return hard_l_steer
return custom_mse
def hard_mining_entropy(k):
"""
Compute binary cross-entropy for collision evaluation and hard-mining.
# Arguments
k: Number of samples for hard-mining.
# Returns
custom_bin_crossentropy: average binary cross-entropy for the current batch.
"""
def custom_bin_crossentropy(y_true, y_pred):
# Parameter t indicates the type of experiment
t = y_true[:,0]
# Number of collision samples
samples_coll = tf.cast(tf.equal(t,0), tf.int32)
n_samples_coll = tf.reduce_sum(samples_coll)
if n_samples_coll == 0:
return 0.0
else:
# Predicted and real labels
pred_coll = tf.squeeze(y_pred, squeeze_dims=-1)
true_coll = y_true[:,1]
# Collision loss
l_coll = tf.multiply((1-t), K.binary_crossentropy(true_coll, pred_coll))
# Hard mining
k_min = tf.minimum(k, n_samples_coll)
_, indices = tf.nn.top_k(l_coll, k=k_min)
max_l_coll = tf.gather(l_coll, indices)
hard_l_coll = tf.divide(tf.reduce_sum(max_l_coll), tf.cast(k, tf.float32))
return hard_l_coll
return custom_bin_crossentropy
def modelToJson(model, json_model_path):
"""
Serialize model into json.
"""
model_json = model.to_json()
with open(json_model_path,"w") as f:
f.write(model_json)
def jsonToModel(json_model_path):
"""
Serialize json into model.
"""
with open(json_model_path, 'r') as json_file:
loaded_model_json = json_file.read()
model = model_from_json(loaded_model_json)
return model
def write_to_file(dictionary, fname):
"""
Writes everything is in a dictionary in json model.
"""
with open(fname, "w") as f:
json.dump(dictionary,f)
print("Written file {}".format(fname))
| 35.206573
| 96
| 0.595146
|
import re
import os
import numpy as np
import tensorflow as tf
import json
import time
from keras import backend as K
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.generic_utils import Progbar
from keras.models import model_from_json
import img_utils
class DroneDataGenerator(ImageDataGenerator):
def flow_from_directory(self, directory, target_size=(224,224),
crop_size=(250,250), color_mode='grayscale', batch_size=32,
shuffle=True, seed=None, follow_links=False):
return DroneDirectoryIterator(
directory, self,
target_size=target_size, crop_size=crop_size, color_mode=color_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
follow_links=follow_links)
class DroneDirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
target_size=(224,224), crop_size = (250,250), color_mode='grayscale',
batch_size=32, shuffle=True, seed=None, follow_links=False):
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.crop_size = tuple(crop_size)
self.follow_links = follow_links
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
if self.color_mode == 'rgb':
self.image_shape = self.crop_size + (3,)
else:
self.image_shape = self.crop_size + (1,)
self.samples = 0
experiments = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
experiments.append(subdir)
self.num_experiments = len(experiments)
self.formats = {'png', 'jpg'}
self.filenames = []
self.ground_truth = []
self.exp_type = []
for subdir in experiments:
subpath = os.path.join(directory, subdir)
self._decode_experiment_dir(subpath)
self.ground_truth = np.array(self.ground_truth, dtype = K.floatx())
assert self.samples > 0, "Did not find any data"
print('Found {} images belonging to {} experiments.'.format(
self.samples, self.num_experiments))
super(DroneDirectoryIterator, self).__init__(self.samples,
batch_size, shuffle, seed)
def _recursive_list(self, subpath):
return sorted(os.walk(subpath, followlinks=self.follow_links),
key=lambda tpl: tpl[0])
def _decode_experiment_dir(self, dir_subpath):
steerings_filename = os.path.join(dir_subpath, "sync_steering.txt")
labels_filename = os.path.join(dir_subpath, "labels.txt")
try:
ground_truth = np.loadtxt(steerings_filename, usecols=0,
delimiter=',', skiprows=1)
exp_type = 1
except OSError as e:
try:
ground_truth = np.loadtxt(labels_filename, usecols=0)
exp_type = 0
except OSError as e:
print("Neither steerings nor labels found in dir {}".format(
dir_subpath))
raise IOError
image_dir_path = os.path.join(dir_subpath, "images")
for root, _, files in self._recursive_list(image_dir_path):
sorted_files = sorted(files,
key = lambda fname: int(re.search(r'\d+',fname).group()))
for frame_number, fname in enumerate(sorted_files):
is_valid = False
for extension in self.formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
absolute_path = os.path.join(root, fname)
self.filenames.append(os.path.relpath(absolute_path,
self.directory))
self.ground_truth.append(ground_truth[frame_number])
self.exp_type.append(exp_type)
self.samples += 1
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array) :
current_batch_size = index_array.shape[0]
batch_x = np.zeros((current_batch_size,) + self.image_shape,
dtype=K.floatx())
batch_steer = np.zeros((current_batch_size, 2,),
dtype=K.floatx())
batch_coll = np.zeros((current_batch_size, 2,),
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
for i, j in enumerate(index_array):
fname = self.filenames[j]
x = img_utils.load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
crop_size=self.crop_size,
target_size=self.target_size)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.exp_type[index_array[i]] == 1:
batch_steer[i,0] =1.0
batch_steer[i,1] = self.ground_truth[index_array[i]]
batch_coll[i] = np.array([1.0, 0.0])
else:
batch_steer[i] = np.array([0.0, 0.0])
batch_coll[i,0] = 0.0
batch_coll[i,1] = self.ground_truth[index_array[i]]
batch_y = [batch_steer, batch_coll]
return batch_x, batch_y
def compute_predictions_and_gt(model, generator, steps,
max_q_size=10,
pickle_safe=False, verbose=0):
steps_done = 0
all_outs = []
all_labels = []
all_ts = []
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(generator)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, gt_lab = generator_output
elif len(generator_output) == 3:
x, gt_lab, _ = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
else:
raise ValueError('Output not valid for current evaluation')
start_time = time.time()
outs = model.predict_on_batch(x)
time_diff = time.time() - start_time
print("\n Time Diff: ", time_diff)
print("Batch Size: ", len(x))
print("FPS: ", len(x)/time_diff)
if not isinstance(outs, list):
outs = [outs]
if not isinstance(gt_lab, list):
gt_lab = [gt_lab]
if not all_outs:
for out in outs:
all_outs.append([])
if not all_labels:
for lab in gt_lab:
all_labels.append([])
all_ts.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
for i, lab in enumerate(gt_lab):
all_labels[i].append(lab[:,1])
all_ts[i].append(lab[:,0])
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
if steps_done == 1:
return [out for out in all_outs], [lab for lab in all_labels], np.concatenate(all_ts[0])
else:
return np.squeeze(np.array([np.concatenate(out) for out in all_outs])).T, \
np.array([np.concatenate(lab) for lab in all_labels]).T, \
np.concatenate(all_ts[0])
def hard_mining_mse(k):
def custom_mse(y_true, y_pred):
t = y_true[:,0]
samples_steer = tf.cast(tf.equal(t,1), tf.int32)
n_samples_steer = tf.reduce_sum(samples_steer)
if n_samples_steer == 0:
return 0.0
else:
pred_steer = tf.squeeze(y_pred, squeeze_dims=-1)
true_steer = y_true[:,1]
l_steer = tf.multiply(t, K.square(pred_steer - true_steer))
k_min = tf.minimum(k, n_samples_steer)
_, indices = tf.nn.top_k(l_steer, k=k_min)
max_l_steer = tf.gather(l_steer, indices)
hard_l_steer = tf.divide(tf.reduce_sum(max_l_steer), tf.cast(k,tf.float32))
return hard_l_steer
return custom_mse
def hard_mining_entropy(k):
def custom_bin_crossentropy(y_true, y_pred):
t = y_true[:,0]
samples_coll = tf.cast(tf.equal(t,0), tf.int32)
n_samples_coll = tf.reduce_sum(samples_coll)
if n_samples_coll == 0:
return 0.0
else:
pred_coll = tf.squeeze(y_pred, squeeze_dims=-1)
true_coll = y_true[:,1]
l_coll = tf.multiply((1-t), K.binary_crossentropy(true_coll, pred_coll))
k_min = tf.minimum(k, n_samples_coll)
_, indices = tf.nn.top_k(l_coll, k=k_min)
max_l_coll = tf.gather(l_coll, indices)
hard_l_coll = tf.divide(tf.reduce_sum(max_l_coll), tf.cast(k, tf.float32))
return hard_l_coll
return custom_bin_crossentropy
def modelToJson(model, json_model_path):
model_json = model.to_json()
with open(json_model_path,"w") as f:
f.write(model_json)
def jsonToModel(json_model_path):
with open(json_model_path, 'r') as json_file:
loaded_model_json = json_file.read()
model = model_from_json(loaded_model_json)
return model
def write_to_file(dictionary, fname):
with open(fname, "w") as f:
json.dump(dictionary,f)
print("Written file {}".format(fname))
| true
| true
|
1c459f2b6ff309defaa99622a9e67444b25d1a67
| 309
|
py
|
Python
|
testing_8709/main.py
|
akvrdata/testing_8709
|
b9987a6a14d582a062f08d9de13f9b46f38989b1
|
[
"MIT"
] | null | null | null |
testing_8709/main.py
|
akvrdata/testing_8709
|
b9987a6a14d582a062f08d9de13f9b46f38989b1
|
[
"MIT"
] | null | null | null |
testing_8709/main.py
|
akvrdata/testing_8709
|
b9987a6a14d582a062f08d9de13f9b46f38989b1
|
[
"MIT"
] | null | null | null |
import sys
import click
@click.command()
@click.option('--count',default=1,help='Number of prints required')
@click.option('--name',help='name to print')
def hello(count,name):
'''Click Cli testing'''
for x in range(count):
click.echo('Hello %s' %name)
if __name__ == '__main__':
hello()
| 23.769231
| 67
| 0.653722
|
import sys
import click
@click.command()
@click.option('--count',default=1,help='Number of prints required')
@click.option('--name',help='name to print')
def hello(count,name):
for x in range(count):
click.echo('Hello %s' %name)
if __name__ == '__main__':
hello()
| true
| true
|
1c459f2e63f5d6cbc44f6b3304bb888e1f9f90a0
| 3,711
|
py
|
Python
|
bauh/api/http.py
|
Flash1232/bauh
|
6f65556c05ae272c1dbbd557c7f80a606658eb56
|
[
"Zlib"
] | 507
|
2019-08-12T16:15:55.000Z
|
2022-03-28T15:49:39.000Z
|
bauh/api/http.py
|
Flash1232/bauh
|
6f65556c05ae272c1dbbd557c7f80a606658eb56
|
[
"Zlib"
] | 176
|
2019-08-14T02:35:21.000Z
|
2022-03-31T21:43:56.000Z
|
bauh/api/http.py
|
Flash1232/bauh
|
6f65556c05ae272c1dbbd557c7f80a606658eb56
|
[
"Zlib"
] | 57
|
2019-09-02T04:09:22.000Z
|
2022-03-21T21:37:16.000Z
|
import logging
import time
import traceback
from typing import Optional
import requests
import yaml
from bauh.commons import system
class HttpClient:
def __init__(self, logger: logging.Logger, max_attempts: int = 2, timeout: int = 30, sleep: float = 0.5):
self.max_attempts = max_attempts
self.session = requests.Session()
self.timeout = timeout
self.sleep = sleep
self.logger = logger
def get(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, ignore_ssl: bool = False, single_call: bool = False, session: bool = True) -> Optional[requests.Response]:
cur_attempts = 1
while cur_attempts <= self.max_attempts:
cur_attempts += 1
try:
args = {'timeout': self.timeout, 'allow_redirects': allow_redirects}
if params:
args['params'] = params
if headers:
args['headers'] = headers
if ignore_ssl:
args['verify'] = False
if session:
res = self.session.get(url, **args)
else:
res = requests.get(url, **args)
if res.status_code == 200:
return res
if single_call:
return res
if self.sleep > 0:
time.sleep(self.sleep)
except Exception as e:
if isinstance(e, requests.exceptions.ConnectionError):
self.logger.error('Internet seems to be off')
raise
self.logger.error("Could not retrieve data from '{}'".format(url))
traceback.print_exc()
continue
self.logger.warning("Could not retrieve data from '{}'".format(url))
def get_json(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, session: bool = True):
res = self.get(url, params=params, headers=headers, allow_redirects=allow_redirects, session=session)
return res.json() if res else None
def get_yaml(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, session: bool = True):
res = self.get(url, params=params, headers=headers, allow_redirects=allow_redirects, session=session)
return yaml.safe_load(res.text) if res else None
def get_content_length_in_bytes(self, url: str, session: bool = True) -> Optional[int]:
params = {'url': url, 'allow_redirects': True, 'stream': True}
try:
if session:
res = self.session.get(**params)
else:
res = requests.get(**params)
except requests.exceptions.ConnectionError:
self.logger.info("Internet seems to be off. Could not reach '{}'".format(url))
return
if res.status_code == 200:
size = res.headers.get('Content-Length')
if size:
try:
return int(size)
except:
pass
def get_content_length(self, url: str, session: bool = True) -> Optional[str]:
size = self.get_content_length_in_bytes(url, session)
if size:
return system.get_human_size_str(size)
def exists(self, url: str, session: bool = True, timeout: int = 5) -> bool:
params = {'url': url, 'allow_redirects': True, 'verify': False, 'timeout': timeout}
if session:
res = self.session.head(**params)
else:
res = self.session.get(**params)
return res.status_code in (200, 403)
| 35.009434
| 207
| 0.565346
|
import logging
import time
import traceback
from typing import Optional
import requests
import yaml
from bauh.commons import system
class HttpClient:
def __init__(self, logger: logging.Logger, max_attempts: int = 2, timeout: int = 30, sleep: float = 0.5):
self.max_attempts = max_attempts
self.session = requests.Session()
self.timeout = timeout
self.sleep = sleep
self.logger = logger
def get(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, ignore_ssl: bool = False, single_call: bool = False, session: bool = True) -> Optional[requests.Response]:
cur_attempts = 1
while cur_attempts <= self.max_attempts:
cur_attempts += 1
try:
args = {'timeout': self.timeout, 'allow_redirects': allow_redirects}
if params:
args['params'] = params
if headers:
args['headers'] = headers
if ignore_ssl:
args['verify'] = False
if session:
res = self.session.get(url, **args)
else:
res = requests.get(url, **args)
if res.status_code == 200:
return res
if single_call:
return res
if self.sleep > 0:
time.sleep(self.sleep)
except Exception as e:
if isinstance(e, requests.exceptions.ConnectionError):
self.logger.error('Internet seems to be off')
raise
self.logger.error("Could not retrieve data from '{}'".format(url))
traceback.print_exc()
continue
self.logger.warning("Could not retrieve data from '{}'".format(url))
def get_json(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, session: bool = True):
res = self.get(url, params=params, headers=headers, allow_redirects=allow_redirects, session=session)
return res.json() if res else None
def get_yaml(self, url: str, params: dict = None, headers: dict = None, allow_redirects: bool = True, session: bool = True):
res = self.get(url, params=params, headers=headers, allow_redirects=allow_redirects, session=session)
return yaml.safe_load(res.text) if res else None
def get_content_length_in_bytes(self, url: str, session: bool = True) -> Optional[int]:
params = {'url': url, 'allow_redirects': True, 'stream': True}
try:
if session:
res = self.session.get(**params)
else:
res = requests.get(**params)
except requests.exceptions.ConnectionError:
self.logger.info("Internet seems to be off. Could not reach '{}'".format(url))
return
if res.status_code == 200:
size = res.headers.get('Content-Length')
if size:
try:
return int(size)
except:
pass
def get_content_length(self, url: str, session: bool = True) -> Optional[str]:
size = self.get_content_length_in_bytes(url, session)
if size:
return system.get_human_size_str(size)
def exists(self, url: str, session: bool = True, timeout: int = 5) -> bool:
params = {'url': url, 'allow_redirects': True, 'verify': False, 'timeout': timeout}
if session:
res = self.session.head(**params)
else:
res = self.session.get(**params)
return res.status_code in (200, 403)
| true
| true
|
1c459fbfb4d5f376b961ba213a2581525628f906
| 398
|
py
|
Python
|
accounts/migrations/0002_account_points.py
|
ebar0n/palermo-coin
|
63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd
|
[
"MIT"
] | null | null | null |
accounts/migrations/0002_account_points.py
|
ebar0n/palermo-coin
|
63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd
|
[
"MIT"
] | 15
|
2019-05-13T23:40:06.000Z
|
2022-03-11T23:39:57.000Z
|
accounts/migrations/0002_account_points.py
|
ebar0n/leviatan-backend
|
63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-07 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='points',
field=models.PositiveIntegerField(default=0, editable=False),
),
]
| 20.947368
| 73
| 0.603015
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='points',
field=models.PositiveIntegerField(default=0, editable=False),
),
]
| true
| true
|
1c45a0ca7dda5396a87bdbca7a0a71105cce95b6
| 1,359
|
py
|
Python
|
python2/timeout.py
|
SLongofono/Python-Misc
|
c6c2735f65b7f06e31996140c2921315b1a6cf9e
|
[
"MIT"
] | 2
|
2017-07-24T17:46:13.000Z
|
2017-12-09T16:00:40.000Z
|
python2/timeout.py
|
SLongofono/Python-Misc
|
c6c2735f65b7f06e31996140c2921315b1a6cf9e
|
[
"MIT"
] | null | null | null |
python2/timeout.py
|
SLongofono/Python-Misc
|
c6c2735f65b7f06e31996140c2921315b1a6cf9e
|
[
"MIT"
] | 1
|
2018-09-18T15:18:47.000Z
|
2018-09-18T15:18:47.000Z
|
def timed_func(f, args=(), kwargs=None, timeout=30, default=None, errormsg="Timeout error"):
# Since kwargs are mutable, assume they don't exist via optional arguments. If they do in fact exist,
# they will exist in this context and be assigned. Otherwise, set to an empty dict and proceed.
kwargs = kwargs or {}
import signal
class TimeoutError(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutError
# Register a signal to our handler
signal.signal(signal.SIGALRM, timeout_handler)
# Trigger an alarm after timeout seconds
signal.alarm(timeout)
# Try a function call:
# If it returns normally before the timeout, pass along the value
# Otherwise, print the specific error and return the default value
try:
result = f(*args, **kwargs)
except TimeoutError:
result = default
print(errormsg)
finally:
signal.alarm(0)
return result
# Silly function that never returns
def forever():
import time
while True:
time.sleep(1)
# Function that may or may not complete depending on the timeout
def andever(a,b):
result = a
while True:
result += b
#if result > 200000000:
if result > 100000000:
return result
# Test
print(timed_func(forever, timeout=2, default="no response", errormsg="failed to update"))
print(timed_func(andever, (1,2), timeout=5, default=-1, errormsg="computation timeout"))
| 28.3125
| 103
| 0.734364
|
def timed_func(f, args=(), kwargs=None, timeout=30, default=None, errormsg="Timeout error"):
# they will exist in this context and be assigned. Otherwise, set to an empty dict and proceed.
kwargs = kwargs or {}
import signal
class TimeoutError(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutError
# Register a signal to our handler
signal.signal(signal.SIGALRM, timeout_handler)
# Trigger an alarm after timeout seconds
signal.alarm(timeout)
# Try a function call:
# If it returns normally before the timeout, pass along the value
# Otherwise, print the specific error and return the default value
try:
result = f(*args, **kwargs)
except TimeoutError:
result = default
print(errormsg)
finally:
signal.alarm(0)
return result
# Silly function that never returns
def forever():
import time
while True:
time.sleep(1)
# Function that may or may not complete depending on the timeout
def andever(a,b):
result = a
while True:
result += b
#if result > 200000000:
if result > 100000000:
return result
# Test
print(timed_func(forever, timeout=2, default="no response", errormsg="failed to update"))
print(timed_func(andever, (1,2), timeout=5, default=-1, errormsg="computation timeout"))
| true
| true
|
1c45a0f0a16e4c957d53072ae53309de03cc22ef
| 6,090
|
py
|
Python
|
docs/conf.py
|
open-datastudio/datastudio
|
5055579adf969ad6d7491454b30ab2fedbaaa067
|
[
"MIT"
] | 10
|
2020-06-23T13:45:44.000Z
|
2021-11-04T13:31:43.000Z
|
docs/conf.py
|
open-datastudio/datastudio
|
5055579adf969ad6d7491454b30ab2fedbaaa067
|
[
"MIT"
] | 1
|
2020-06-23T23:15:10.000Z
|
2020-08-11T04:41:25.000Z
|
docs/conf.py
|
open-datastudio/datastudio
|
5055579adf969ad6d7491454b30ab2fedbaaa067
|
[
"MIT"
] | 2
|
2021-11-20T21:24:36.000Z
|
2022-01-05T03:35:32.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Open Data Studio'
copyright = u'Open Data Studio Authors'
author = u'Open Data Studio Authors'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosectionlabel',
'aafigure.sphinxext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_logo = '_static/open-datastudio-logo.svg'
# Enable link of 'View page source'
#html_show_sourcelink = False
# Add 'Edit on Github' link instead of 'View page source'
# reference:https://docs.readthedocs.io/en/latest/vcs.html
html_context = {
# Enable the "Edit in GitHub link within the header of each page.
'display_github': True,
# Set the following variables to generate the resulting github URL for each page.
# Format Template: https://{{ github_host|default("github.com") }}/{{ github_user }}
#/{{ github_repo }}/blob/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}
#https://github.com/runawayhorse001/SphinxGithub/blob/master/doc/index.rst
'github_user': 'open-datastudio',
'github_repo': 'datastudio',
'github_version': 'master/docs/',
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenDataStudioDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OPENDATASTUDIO.tex', u'Open Data Studio Documentation',
u'Open Data Studio', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'open data studio', u'Open Data Studio Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Open Data Studio', u'Open Data Studio Documentation',
author, 'Open Data Studio', 'Cloud data tools',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 31.71875
| 93
| 0.663054
|
project = u'Open Data Studio'
copyright = u'Open Data Studio Authors'
author = u'Open Data Studio Authors'
version = u''
release = u''
extensions = [
'sphinx.ext.autosectionlabel',
'aafigure.sphinxext'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_logo = '_static/open-datastudio-logo.svg'
# Enable link of 'View page source'
#html_show_sourcelink = False
# Add 'Edit on Github' link instead of 'View page source'
# reference:https://docs.readthedocs.io/en/latest/vcs.html
html_context = {
# Enable the "Edit in GitHub link within the header of each page.
'display_github': True,
# Set the following variables to generate the resulting github URL for each page.
# Format Template: https://{{ github_host|default("github.com") }}/{{ github_user }}
#/{{ github_repo }}/blob/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}
#https://github.com/runawayhorse001/SphinxGithub/blob/master/doc/index.rst
'github_user': 'open-datastudio',
'github_repo': 'datastudio',
'github_version': 'master/docs/',
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenDataStudioDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OPENDATASTUDIO.tex', u'Open Data Studio Documentation',
u'Open Data Studio', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'open data studio', u'Open Data Studio Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Open Data Studio', u'Open Data Studio Documentation',
author, 'Open Data Studio', 'Cloud data tools',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| true
| true
|
1c45a10a9ddde743dce9b343e4d18f568bb05e72
| 3,531
|
py
|
Python
|
python/paddle/fluid/tests/unittests/dist_mnist.py
|
hshen14/Paddle
|
0962be9c800d29e0804fc3135163bdfba1564c61
|
[
"Apache-2.0"
] | 2
|
2019-04-03T05:36:17.000Z
|
2020-04-29T03:38:54.000Z
|
python/paddle/fluid/tests/unittests/dist_mnist.py
|
hshen14/Paddle
|
0962be9c800d29e0804fc3135163bdfba1564c61
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/dist_mnist.py
|
hshen14/Paddle
|
0962be9c800d29e0804fc3135163bdfba1564c61
|
[
"Apache-2.0"
] | 3
|
2019-01-07T06:50:29.000Z
|
2019-03-13T08:48:23.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
DTYPE = "float32"
paddle.dataset.mnist.fetch()
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
def cnn_model(data):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=data,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.01)))
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.01)))
SIZE = 10
input_shape = conv_pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5
predict = fluid.layers.fc(
input=conv_pool_2,
size=SIZE,
act="softmax",
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)))
return predict
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# Train program
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size_tensor)
inference_program = fluid.default_main_program().clone()
# Optimization
# TODO(typhoonzero): fix distributed adam optimizer
# opt = fluid.optimizer.AdamOptimizer(
# learning_rate=0.001, beta1=0.9, beta2=0.999)
opt = fluid.optimizer.Momentum(learning_rate=self.lr, momentum=0.9)
# Reader
train_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
opt.minimize(avg_cost)
return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict
if __name__ == "__main__":
runtime_main(TestDistMnist2x2)
| 32.394495
| 89
| 0.687624
|
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
DTYPE = "float32"
paddle.dataset.mnist.fetch()
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
def cnn_model(data):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=data,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.01)))
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu",
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
value=0.01)))
SIZE = 10
input_shape = conv_pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5
predict = fluid.layers.fc(
input=conv_pool_2,
size=SIZE,
act="softmax",
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)))
return predict
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size_tensor)
inference_program = fluid.default_main_program().clone()
opt = fluid.optimizer.Momentum(learning_rate=self.lr, momentum=0.9)
train_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
opt.minimize(avg_cost)
return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict
if __name__ == "__main__":
runtime_main(TestDistMnist2x2)
| true
| true
|
1c45a12fb0bf22d70b2259e645866d62d1c2fa9f
| 5,240
|
py
|
Python
|
tests/test_cli.py
|
steffenschumacher/NIPAP
|
200ec08ce02ba9f782b276510bc7bb23b20d7570
|
[
"MIT"
] | 1
|
2018-12-07T15:59:27.000Z
|
2018-12-07T15:59:27.000Z
|
tests/test_cli.py
|
steffenschumacher/NIPAP
|
200ec08ce02ba9f782b276510bc7bb23b20d7570
|
[
"MIT"
] | 1
|
2021-07-24T14:44:10.000Z
|
2021-07-24T14:44:10.000Z
|
tests/test_cli.py
|
steffenschumacher/NIPAP
|
200ec08ce02ba9f782b276510bc7bb23b20d7570
|
[
"MIT"
] | 1
|
2020-05-27T15:28:03.000Z
|
2020-05-27T15:28:03.000Z
|
#!/usr/bin/env python
# vim: et :
import logging
import unittest
import sys
sys.path.append('../nipap/')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_format = "%(levelname)-8s %(message)s"
import xmlrpclib
server_url = "http://unittest:gottatest@127.0.0.1:1337/XMLRPC"
s = xmlrpclib.Server(server_url, allow_none=1);
ad = { 'authoritative_source': 'nipap' }
nipap_bin = '../nipap-cli/nipap'
class NipapCliTest(unittest.TestCase):
""" Tests the NIPAP CLI
We presume the database is empty
"""
maxDiff = None
logger = None
cfg = None
nipap = None
def setUp(self):
# logging
self.logger = logging.getLogger(self.__class__.__name__)
# NIPAP
self.cfg = NipapConfig('/etc/nipap/nipap.conf')
self.nipap = Nipap()
# create dummy auth object
# As the authentication is performed before the query hits the Nipap
# class, it does not matter what user we use here
self.auth = SqliteAuth('local', 'unittest', 'unittest', 'unittest')
self.auth.authenticated_as = 'unittest'
self.auth.full_name = 'Unit test'
# have to delete hosts before we can delete the rest
self.nipap._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
self.nipap._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
self.nipap._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
self.nipap._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
self.nipap._execute("DELETE FROM ip_net_pool")
self.nipap._execute("DELETE FROM ip_net_asn")
def _mangle_prefix_result(self, res):
""" Mangle prefix result for easier testing
We can never predict the values of things like the ID (okay, that
one is actually kind of doable) or the added and last_modified
timestamp. This function will make sure the values are present but
then strip them to make it easier to test against an expected
result.
"""
if isinstance(res, list):
# res from list_prefix
for p in res:
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(p['total_addresses'])
del(p['used_addresses'])
del(p['free_addresses'])
elif isinstance(res, dict) and 'result' in res:
# res from smart search
for p in res['result']:
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(p['total_addresses'])
del(p['used_addresses'])
del(p['free_addresses'])
elif isinstance(res, dict):
# just one single prefix
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(res['total_addresses'])
del(res['used_addresses'])
del(res['free_addresses'])
return res
def _run_cmd(self, cmd):
""" Run a command
"""
import subprocess
return subprocess.check_output(cmd)
def test_prefix_add_list(self):
""" Add a prefix and verify result in database
"""
ref = {
'prefix': '1.3.3.0/24',
'type': 'assignment',
'status': 'assigned',
'description': 'foo description',
'comment': 'comment bar',
'country': 'AB',
'alarm_priority': 'high',
'monitor': 'true',
'order_id': '123',
'customer_id': '66'
}
cmd = [nipap_bin, 'address', 'add']
for key in ref:
cmd.append(key)
cmd.append(ref[key])
ref['display_prefix'] = '1.3.3.0/24'
ref['indent'] = 0
ref['family'] = 4
ref['monitor'] = True
ref['pool_id'] = None
ref['pool_name'] = None
ref['vrf_id'] = 0
ref['vrf_name'] = 'default'
ref['vrf_rt'] = None
ref['external_key'] = None
ref['node'] = None
ref['authoritative_source'] = 'nipap'
ref['vlan'] = None
ref['inherited_tags'] = []
ref['tags'] = []
ref['avps'] = {}
ref['expires'] = None
self._run_cmd(cmd)
res = self._mangle_prefix_result(s.list_prefix({ 'auth': ad, 'spec': {} }))
del(res[0]['id'])
self.assertEqual(res, [ ref, ])
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
| 29.438202
| 140
| 0.55687
|
import logging
import unittest
import sys
sys.path.append('../nipap/')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_format = "%(levelname)-8s %(message)s"
import xmlrpclib
server_url = "http://unittest:gottatest@127.0.0.1:1337/XMLRPC"
s = xmlrpclib.Server(server_url, allow_none=1);
ad = { 'authoritative_source': 'nipap' }
nipap_bin = '../nipap-cli/nipap'
class NipapCliTest(unittest.TestCase):
maxDiff = None
logger = None
cfg = None
nipap = None
def setUp(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.cfg = NipapConfig('/etc/nipap/nipap.conf')
self.nipap = Nipap()
self.auth = SqliteAuth('local', 'unittest', 'unittest', 'unittest')
self.auth.authenticated_as = 'unittest'
self.auth.full_name = 'Unit test'
self.nipap._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
self.nipap._execute("DELETE FROM ip_net_plan")
self.nipap._execute("DELETE FROM ip_net_vrf WHERE id > 0")
self.nipap._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
self.nipap._execute("DELETE FROM ip_net_pool")
self.nipap._execute("DELETE FROM ip_net_asn")
def _mangle_prefix_result(self, res):
if isinstance(res, list):
for p in res:
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(p['total_addresses'])
del(p['used_addresses'])
del(p['free_addresses'])
elif isinstance(res, dict) and 'result' in res:
for p in res['result']:
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(p['total_addresses'])
del(p['used_addresses'])
del(p['free_addresses'])
elif isinstance(res, dict):
self.assertIn('added', p)
self.assertIn('last_modified', p)
del(p['added'])
del(p['last_modified'])
del(res['total_addresses'])
del(res['used_addresses'])
del(res['free_addresses'])
return res
def _run_cmd(self, cmd):
import subprocess
return subprocess.check_output(cmd)
def test_prefix_add_list(self):
ref = {
'prefix': '1.3.3.0/24',
'type': 'assignment',
'status': 'assigned',
'description': 'foo description',
'comment': 'comment bar',
'country': 'AB',
'alarm_priority': 'high',
'monitor': 'true',
'order_id': '123',
'customer_id': '66'
}
cmd = [nipap_bin, 'address', 'add']
for key in ref:
cmd.append(key)
cmd.append(ref[key])
ref['display_prefix'] = '1.3.3.0/24'
ref['indent'] = 0
ref['family'] = 4
ref['monitor'] = True
ref['pool_id'] = None
ref['pool_name'] = None
ref['vrf_id'] = 0
ref['vrf_name'] = 'default'
ref['vrf_rt'] = None
ref['external_key'] = None
ref['node'] = None
ref['authoritative_source'] = 'nipap'
ref['vlan'] = None
ref['inherited_tags'] = []
ref['tags'] = []
ref['avps'] = {}
ref['expires'] = None
self._run_cmd(cmd)
res = self._mangle_prefix_result(s.list_prefix({ 'auth': ad, 'spec': {} }))
del(res[0]['id'])
self.assertEqual(res, [ ref, ])
if __name__ == '__main__':
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
| true
| true
|
1c45a18a21fd6fbd0b288b2271b398a0ed9f080d
| 12,540
|
py
|
Python
|
napari/_qt/widgets/qt_viewer_dock_widget.py
|
Mishrasubha/napari
|
c4d1038fc3ed30dc228949cbdedf12826ec2efc2
|
[
"BSD-3-Clause"
] | null | null | null |
napari/_qt/widgets/qt_viewer_dock_widget.py
|
Mishrasubha/napari
|
c4d1038fc3ed30dc228949cbdedf12826ec2efc2
|
[
"BSD-3-Clause"
] | 3
|
2020-11-14T08:35:18.000Z
|
2021-07-26T10:06:32.000Z
|
napari/_qt/widgets/qt_viewer_dock_widget.py
|
Mishrasubha/napari
|
c4d1038fc3ed30dc228949cbdedf12826ec2efc2
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from functools import reduce
from itertools import count
from operator import ior
from typing import List, Optional
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QDockWidget,
QFrame,
QHBoxLayout,
QLabel,
QPushButton,
QSizePolicy,
QVBoxLayout,
QWidget,
)
from ...utils.translations import trans
from ..utils import combine_widgets, qt_signals_blocked
counter = count()
_sentinel = object()
_SHORTCUT_DEPRECATION_STRING = trans._(
'The shortcut parameter is deprecated since version 0.4.8, please use the action and shortcut manager APIs. The new action manager and shortcut API allow user configuration and localisation. (got {shortcut})',
shortcut="{shortcut}",
)
class QtViewerDockWidget(QDockWidget):
"""Wrap a QWidget in a QDockWidget and forward viewer events
Parameters
----------
qt_viewer : QtViewer
The QtViewer instance that this dock widget will belong to.
widget : QWidget
`widget` that will be added as QDockWidget's main widget.
name : str
Name of dock widget.
area : str
Side of the main window to which the new dock widget will be added.
Must be in {'left', 'right', 'top', 'bottom'}
allowed_areas : list[str], optional
Areas, relative to main window, that the widget is allowed dock.
Each item in list must be in {'left', 'right', 'top', 'bottom'}
By default, all areas are allowed.
shortcut : str, optional
Keyboard shortcut to appear in dropdown menu.
.. deprecated:: 0.4.8
The shortcut parameter is deprecated since version 0.4.8, please use
the action and shortcut manager APIs. The new action manager and
shortcut API allow user configuration and localisation.
add_vertical_stretch : bool, optional
Whether to add stretch to the bottom of vertical widgets (pushing
widgets up towards the top of the allotted area, instead of letting
them distribute across the vertical space). By default, True.
"""
def __init__(
self,
qt_viewer,
widget: QWidget,
*,
name: str = '',
area: str = 'right',
allowed_areas: Optional[List[str]] = None,
shortcut=_sentinel,
object_name: str = '',
add_vertical_stretch=True,
):
self.qt_viewer = qt_viewer
super().__init__(name)
self._parent = qt_viewer
self.name = name
areas = {
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea,
'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
}
if area not in areas:
raise ValueError(
trans._(
'area argument must be in {areas}',
deferred=True,
areas=list(areas.keys()),
)
)
self.area = area
self.qt_area = areas[area]
if shortcut is not _sentinel:
warnings.warn(
_SHORTCUT_DEPRECATION_STRING.format(shortcut=shortcut),
FutureWarning,
stacklevel=2,
)
else:
shortcut = None
self._shortcut = shortcut
if allowed_areas:
if not isinstance(allowed_areas, (list, tuple)):
raise TypeError(
trans._(
'`allowed_areas` must be a list or tuple',
deferred=True,
)
)
if any(area not in areas for area in allowed_areas):
raise ValueError(
trans._(
'all allowed_areas argument must be in {areas}',
deferred=True,
areas=list(areas.keys()),
)
)
allowed_areas = reduce(ior, [areas[a] for a in allowed_areas])
else:
allowed_areas = Qt.AllDockWidgetAreas
self.setAllowedAreas(allowed_areas)
self.setMinimumHeight(50)
self.setMinimumWidth(50)
# FIXME:
self.setObjectName(object_name or name)
is_vertical = area in {'left', 'right'}
widget = combine_widgets(widget, vertical=is_vertical)
self.setWidget(widget)
if is_vertical and add_vertical_stretch:
self._maybe_add_vertical_stretch(widget)
self._features = self.features()
self.dockLocationChanged.connect(self._set_title_orientation)
# custom title bar
self.title = QtCustomTitleBar(self, title=self.name)
self.setTitleBarWidget(self.title)
self.visibilityChanged.connect(self._on_visibility_changed)
def destroyOnClose(self):
"""Destroys dock plugin dock widget when 'x' is clicked."""
self.qt_viewer.viewer.window.remove_dock_widget(self)
def _maybe_add_vertical_stretch(self, widget):
"""Add vertical stretch to the bottom of a vertical layout only
...if there is not already a widget that wants vertical space
(like a textedit or listwidget or something).
"""
exempt_policies = {
QSizePolicy.Expanding,
QSizePolicy.MinimumExpanding,
QSizePolicy.Ignored,
}
if widget.sizePolicy().verticalPolicy() in exempt_policies:
return
# not uncommon to see people shadow the builtin layout() method
# which breaks our ability to add vertical stretch...
try:
wlayout = widget.layout()
if wlayout is None:
return
except TypeError:
return
for i in range(wlayout.count()):
wdg = wlayout.itemAt(i).widget()
if (
wdg is not None
and wdg.sizePolicy().verticalPolicy() in exempt_policies
):
return
# not all widgets have addStretch...
if hasattr(wlayout, 'addStretch'):
wlayout.addStretch(next(counter))
@property
def shortcut(self):
warnings.warn(
_SHORTCUT_DEPRECATION_STRING,
FutureWarning,
stacklevel=2,
)
return self._shortcut
def setFeatures(self, features):
super().setFeatures(features)
self._features = self.features()
def keyPressEvent(self, event):
# if you subclass QtViewerDockWidget and override the keyPressEvent
# method, be sure to call super().keyPressEvent(event) at the end of
# your method to pass uncaught key-combinations to the viewer.
return self.qt_viewer.keyPressEvent(event)
def _set_title_orientation(self, area):
if area in (Qt.LeftDockWidgetArea, Qt.RightDockWidgetArea):
features = self._features
if features & self.DockWidgetVerticalTitleBar:
features = features ^ self.DockWidgetVerticalTitleBar
else:
features = self._features | self.DockWidgetVerticalTitleBar
self.setFeatures(features)
@property
def is_vertical(self):
if not self.isFloating():
par = self.parent()
if par and hasattr(par, 'dockWidgetArea'):
return par.dockWidgetArea(self) in (
Qt.LeftDockWidgetArea,
Qt.RightDockWidgetArea,
)
return self.size().height() > self.size().width()
def _on_visibility_changed(self, visible):
try:
actions = [
action.text()
for action in self.qt_viewer.viewer.window.plugins_menu.actions()
]
idx = actions.index(self.name)
current_action = (
self.qt_viewer.viewer.window.plugins_menu.actions()[idx]
)
current_action.setChecked(visible)
self.setVisible(visible)
except (AttributeError, ValueError):
# AttributeError: This error happens when the plugins menu is not yet built.
# ValueError: This error is when the action is from the windows menu.
pass
if not visible:
return
with qt_signals_blocked(self):
self.setTitleBarWidget(None)
if not self.isFloating():
self.title = QtCustomTitleBar(
self, title=self.name, vertical=not self.is_vertical
)
self.setTitleBarWidget(self.title)
def setWidget(self, widget):
widget._parent = self
super().setWidget(widget)
class QtCustomTitleBar(QLabel):
"""A widget to be used as the titleBar in the QtViewerDockWidget.
Keeps vertical size minimal, has a hand cursor and styles (in stylesheet)
for hover. Close and float buttons.
Parameters
----------
parent : QDockWidget
The QtViewerDockWidget to which this titlebar belongs
title : str
A string to put in the titlebar.
vertical : bool
Whether this titlebar is oriented vertically or not.
"""
def __init__(self, parent, title: str = '', vertical=False):
super().__init__(parent)
self.setObjectName("QtCustomTitleBar")
self.setProperty('vertical', str(vertical))
self.vertical = vertical
self.setToolTip(trans._('drag to move. double-click to float'))
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
add_close = False
try:
# if the plugins menu is already created, check to see if this is a plugin
# dock widget. If it is, then add the close button option to the title bar.
actions = [
action.text()
for action in self.parent().qt_viewer.viewer.window.plugins_menu.actions()
]
if self.parent().name in actions:
add_close = True
self.close_button = QPushButton(self)
self.close_button.setToolTip(trans._('close this panel'))
self.close_button.setObjectName("QTitleBarCloseButton")
self.close_button.setCursor(Qt.ArrowCursor)
self.close_button.clicked.connect(
lambda: self.parent().destroyOnClose()
)
else:
add_close = False
except AttributeError:
pass
self.hide_button = QPushButton(self)
self.hide_button.setToolTip(trans._('hide this panel'))
self.hide_button.setObjectName("QTitleBarHideButton")
self.hide_button.setCursor(Qt.ArrowCursor)
self.hide_button.clicked.connect(lambda: self.parent().close())
self.float_button = QPushButton(self)
self.float_button.setToolTip(trans._('float this panel'))
self.float_button.setObjectName("QTitleBarFloatButton")
self.float_button.setCursor(Qt.ArrowCursor)
self.float_button.clicked.connect(
lambda: self.parent().setFloating(not self.parent().isFloating())
)
self.title = QLabel(title, self)
self.title.setSizePolicy(
QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum)
)
if vertical:
layout = QVBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(0, 8, 0, 8)
line.setFixedWidth(1)
if add_close:
layout.addWidget(self.close_button, 0, Qt.AlignHCenter)
layout.addWidget(self.hide_button, 0, Qt.AlignHCenter)
layout.addWidget(self.float_button, 0, Qt.AlignHCenter)
layout.addWidget(line, 0, Qt.AlignHCenter)
self.title.hide()
else:
layout = QHBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(8, 1, 8, 0)
line.setFixedHeight(1)
if add_close:
layout.addWidget(self.close_button)
layout.addWidget(self.hide_button)
layout.addWidget(self.float_button)
layout.addWidget(line)
layout.addWidget(self.title)
self.setLayout(layout)
self.setCursor(Qt.OpenHandCursor)
def sizeHint(self):
# this seems to be the correct way to set the height of the titlebar
szh = super().sizeHint()
if self.vertical:
szh.setWidth(20)
else:
szh.setHeight(20)
return szh
| 35.12605
| 213
| 0.596332
|
import warnings
from functools import reduce
from itertools import count
from operator import ior
from typing import List, Optional
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QDockWidget,
QFrame,
QHBoxLayout,
QLabel,
QPushButton,
QSizePolicy,
QVBoxLayout,
QWidget,
)
from ...utils.translations import trans
from ..utils import combine_widgets, qt_signals_blocked
counter = count()
_sentinel = object()
_SHORTCUT_DEPRECATION_STRING = trans._(
'The shortcut parameter is deprecated since version 0.4.8, please use the action and shortcut manager APIs. The new action manager and shortcut API allow user configuration and localisation. (got {shortcut})',
shortcut="{shortcut}",
)
class QtViewerDockWidget(QDockWidget):
def __init__(
self,
qt_viewer,
widget: QWidget,
*,
name: str = '',
area: str = 'right',
allowed_areas: Optional[List[str]] = None,
shortcut=_sentinel,
object_name: str = '',
add_vertical_stretch=True,
):
self.qt_viewer = qt_viewer
super().__init__(name)
self._parent = qt_viewer
self.name = name
areas = {
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea,
'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
}
if area not in areas:
raise ValueError(
trans._(
'area argument must be in {areas}',
deferred=True,
areas=list(areas.keys()),
)
)
self.area = area
self.qt_area = areas[area]
if shortcut is not _sentinel:
warnings.warn(
_SHORTCUT_DEPRECATION_STRING.format(shortcut=shortcut),
FutureWarning,
stacklevel=2,
)
else:
shortcut = None
self._shortcut = shortcut
if allowed_areas:
if not isinstance(allowed_areas, (list, tuple)):
raise TypeError(
trans._(
'`allowed_areas` must be a list or tuple',
deferred=True,
)
)
if any(area not in areas for area in allowed_areas):
raise ValueError(
trans._(
'all allowed_areas argument must be in {areas}',
deferred=True,
areas=list(areas.keys()),
)
)
allowed_areas = reduce(ior, [areas[a] for a in allowed_areas])
else:
allowed_areas = Qt.AllDockWidgetAreas
self.setAllowedAreas(allowed_areas)
self.setMinimumHeight(50)
self.setMinimumWidth(50)
self.setObjectName(object_name or name)
is_vertical = area in {'left', 'right'}
widget = combine_widgets(widget, vertical=is_vertical)
self.setWidget(widget)
if is_vertical and add_vertical_stretch:
self._maybe_add_vertical_stretch(widget)
self._features = self.features()
self.dockLocationChanged.connect(self._set_title_orientation)
self.title = QtCustomTitleBar(self, title=self.name)
self.setTitleBarWidget(self.title)
self.visibilityChanged.connect(self._on_visibility_changed)
def destroyOnClose(self):
self.qt_viewer.viewer.window.remove_dock_widget(self)
def _maybe_add_vertical_stretch(self, widget):
exempt_policies = {
QSizePolicy.Expanding,
QSizePolicy.MinimumExpanding,
QSizePolicy.Ignored,
}
if widget.sizePolicy().verticalPolicy() in exempt_policies:
return
try:
wlayout = widget.layout()
if wlayout is None:
return
except TypeError:
return
for i in range(wlayout.count()):
wdg = wlayout.itemAt(i).widget()
if (
wdg is not None
and wdg.sizePolicy().verticalPolicy() in exempt_policies
):
return
if hasattr(wlayout, 'addStretch'):
wlayout.addStretch(next(counter))
@property
def shortcut(self):
warnings.warn(
_SHORTCUT_DEPRECATION_STRING,
FutureWarning,
stacklevel=2,
)
return self._shortcut
def setFeatures(self, features):
super().setFeatures(features)
self._features = self.features()
def keyPressEvent(self, event):
return self.qt_viewer.keyPressEvent(event)
def _set_title_orientation(self, area):
if area in (Qt.LeftDockWidgetArea, Qt.RightDockWidgetArea):
features = self._features
if features & self.DockWidgetVerticalTitleBar:
features = features ^ self.DockWidgetVerticalTitleBar
else:
features = self._features | self.DockWidgetVerticalTitleBar
self.setFeatures(features)
@property
def is_vertical(self):
if not self.isFloating():
par = self.parent()
if par and hasattr(par, 'dockWidgetArea'):
return par.dockWidgetArea(self) in (
Qt.LeftDockWidgetArea,
Qt.RightDockWidgetArea,
)
return self.size().height() > self.size().width()
def _on_visibility_changed(self, visible):
try:
actions = [
action.text()
for action in self.qt_viewer.viewer.window.plugins_menu.actions()
]
idx = actions.index(self.name)
current_action = (
self.qt_viewer.viewer.window.plugins_menu.actions()[idx]
)
current_action.setChecked(visible)
self.setVisible(visible)
except (AttributeError, ValueError):
pass
if not visible:
return
with qt_signals_blocked(self):
self.setTitleBarWidget(None)
if not self.isFloating():
self.title = QtCustomTitleBar(
self, title=self.name, vertical=not self.is_vertical
)
self.setTitleBarWidget(self.title)
def setWidget(self, widget):
widget._parent = self
super().setWidget(widget)
class QtCustomTitleBar(QLabel):
def __init__(self, parent, title: str = '', vertical=False):
super().__init__(parent)
self.setObjectName("QtCustomTitleBar")
self.setProperty('vertical', str(vertical))
self.vertical = vertical
self.setToolTip(trans._('drag to move. double-click to float'))
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
add_close = False
try:
actions = [
action.text()
for action in self.parent().qt_viewer.viewer.window.plugins_menu.actions()
]
if self.parent().name in actions:
add_close = True
self.close_button = QPushButton(self)
self.close_button.setToolTip(trans._('close this panel'))
self.close_button.setObjectName("QTitleBarCloseButton")
self.close_button.setCursor(Qt.ArrowCursor)
self.close_button.clicked.connect(
lambda: self.parent().destroyOnClose()
)
else:
add_close = False
except AttributeError:
pass
self.hide_button = QPushButton(self)
self.hide_button.setToolTip(trans._('hide this panel'))
self.hide_button.setObjectName("QTitleBarHideButton")
self.hide_button.setCursor(Qt.ArrowCursor)
self.hide_button.clicked.connect(lambda: self.parent().close())
self.float_button = QPushButton(self)
self.float_button.setToolTip(trans._('float this panel'))
self.float_button.setObjectName("QTitleBarFloatButton")
self.float_button.setCursor(Qt.ArrowCursor)
self.float_button.clicked.connect(
lambda: self.parent().setFloating(not self.parent().isFloating())
)
self.title = QLabel(title, self)
self.title.setSizePolicy(
QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum)
)
if vertical:
layout = QVBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(0, 8, 0, 8)
line.setFixedWidth(1)
if add_close:
layout.addWidget(self.close_button, 0, Qt.AlignHCenter)
layout.addWidget(self.hide_button, 0, Qt.AlignHCenter)
layout.addWidget(self.float_button, 0, Qt.AlignHCenter)
layout.addWidget(line, 0, Qt.AlignHCenter)
self.title.hide()
else:
layout = QHBoxLayout()
layout.setSpacing(4)
layout.setContentsMargins(8, 1, 8, 0)
line.setFixedHeight(1)
if add_close:
layout.addWidget(self.close_button)
layout.addWidget(self.hide_button)
layout.addWidget(self.float_button)
layout.addWidget(line)
layout.addWidget(self.title)
self.setLayout(layout)
self.setCursor(Qt.OpenHandCursor)
def sizeHint(self):
szh = super().sizeHint()
if self.vertical:
szh.setWidth(20)
else:
szh.setHeight(20)
return szh
| true
| true
|
1c45a1a090a13d50476e4eb2e61b77dfeabe3a7e
| 22,311
|
py
|
Python
|
test/functional/importmulti.py
|
DeepPool/test
|
c6d99f019667ea4bf51139adff2a98d46c0015ed
|
[
"MIT"
] | null | null | null |
test/functional/importmulti.py
|
DeepPool/test
|
c6d99f019667ea4bf51139adff2a98d46c0015ed
|
[
"MIT"
] | null | null | null |
test/functional/importmulti.py
|
DeepPool/test
|
c6d99f019667ea4bf51139adff2a98d46c0015ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import DietBitcoinTestFramework
from test_framework.util import *
class ImportMultiTest (DietBitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
# keyword definition
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# DietBitcoin Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| 48.084052
| 137
| 0.631841
|
from test_framework.test_framework import DietBitcoinTestFramework
from test_framework.util import *
class ImportMultiTest (DietBitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
assert_equal(node0_address1['ismine'], True)
assert_equal(self.nodes[1].getblockcount(),1)
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| true
| true
|
1c45a2de98069c080d2cca90e61524a21453a51c
| 1,957
|
py
|
Python
|
examples/get-started/play_mp3/example_test.py
|
kigor302/esp-adf
|
7feaf6c4b23d2a06850f96c302eebb814516239c
|
[
"MIT-0"
] | 12
|
2021-04-15T14:15:27.000Z
|
2022-01-17T03:40:35.000Z
|
examples/get-started/play_mp3/example_test.py
|
Tianxiaomo/esp-adf
|
fae539c3035b2c041f49c5b01cdc4c99038595b0
|
[
"MIT-0"
] | 2
|
2021-04-03T22:00:11.000Z
|
2021-10-03T18:27:39.000Z
|
examples/get-started/play_mp3/example_test.py
|
Tianxiaomo/esp-adf
|
fae539c3035b2c041f49c5b01cdc4c99038595b0
|
[
"MIT-0"
] | 4
|
2021-06-22T10:08:07.000Z
|
2021-11-17T23:21:04.000Z
|
import os
import sys
# this is a test case written with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
auto_test_path = os.getenv("AUTO_TEST_PATH")
if auto_test_path and auto_test_path not in sys.path:
sys.path.insert(0, auto_test_path)
import TinyFW
import NormalProject
from NormalProject.ProjectDUT import ProDUT
from NormalProject.ProjectApp import Example
from BasicUtility.RecordAudioFile import AudioRecord
import ADFExampleTest
@NormalProject.example_test(env_tag="Example_AUDIO_PLAY", ignore=True)
@ADFExampleTest.play_test(os.path.join(os.getenv("ADF_PATH"), "examples/get-started/play_mp3/main/adf_music.mp3"),
os.path.join(os.getenv("ADF_PATH"), "examples/get-started/play_mp3/main/dest.wav"))
def example_test_play_mp3(env, extra_data):
dut1 = env.get_dut("play_mp3", "examples/get-started/play_mp3", pro_path=os.getenv("ADF_PATH"))
# start test
dut1.start_app()
dut1.reset()
dut1.expect("[ 1 ] Start audio codec chip", timeout=30)
dut1.expect("[ 2 ] Create audio pipeline, add all elements to pipeline, and subscribe pipeline event")
dut1.expect("[2.1] Create mp3 decoder to decode mp3 file and set custom read callback")
dut1.expect("[2.2] Create i2s stream to write data to codec chip")
dut1.expect("[2.3] Register all elements to audio pipeline")
dut1.expect("[2.4] Link it together [mp3_music_read_cb]-->mp3_decoder-->i2s_stream-->[codec_chip]")
dut1.expect("[ 3 ] Setup event listener")
dut1.expect("[3.1] Listening event from all elements of pipeline")
dut1.expect("[ 4 ] Start audio_pipeline")
dut1.expect("[ 5 ] Stop audio_pipeline", timeout=30)
if __name__ == '__main__':
example_test_play_mp3()
| 39.938776
| 114
| 0.748084
|
import os
import sys
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
auto_test_path = os.getenv("AUTO_TEST_PATH")
if auto_test_path and auto_test_path not in sys.path:
sys.path.insert(0, auto_test_path)
import TinyFW
import NormalProject
from NormalProject.ProjectDUT import ProDUT
from NormalProject.ProjectApp import Example
from BasicUtility.RecordAudioFile import AudioRecord
import ADFExampleTest
@NormalProject.example_test(env_tag="Example_AUDIO_PLAY", ignore=True)
@ADFExampleTest.play_test(os.path.join(os.getenv("ADF_PATH"), "examples/get-started/play_mp3/main/adf_music.mp3"),
os.path.join(os.getenv("ADF_PATH"), "examples/get-started/play_mp3/main/dest.wav"))
def example_test_play_mp3(env, extra_data):
dut1 = env.get_dut("play_mp3", "examples/get-started/play_mp3", pro_path=os.getenv("ADF_PATH"))
dut1.start_app()
dut1.reset()
dut1.expect("[ 1 ] Start audio codec chip", timeout=30)
dut1.expect("[ 2 ] Create audio pipeline, add all elements to pipeline, and subscribe pipeline event")
dut1.expect("[2.1] Create mp3 decoder to decode mp3 file and set custom read callback")
dut1.expect("[2.2] Create i2s stream to write data to codec chip")
dut1.expect("[2.3] Register all elements to audio pipeline")
dut1.expect("[2.4] Link it together [mp3_music_read_cb]-->mp3_decoder-->i2s_stream-->[codec_chip]")
dut1.expect("[ 3 ] Setup event listener")
dut1.expect("[3.1] Listening event from all elements of pipeline")
dut1.expect("[ 4 ] Start audio_pipeline")
dut1.expect("[ 5 ] Stop audio_pipeline", timeout=30)
if __name__ == '__main__':
example_test_play_mp3()
| true
| true
|
1c45a4aba3bdd23727ad80971a816dcd80684560
| 2,390
|
py
|
Python
|
lib/util.py
|
ks-tec/Hydroponic
|
d9347f82698841d85c0a45908e8671b36c50ffce
|
[
"MIT"
] | 1
|
2021-05-27T13:32:45.000Z
|
2021-05-27T13:32:45.000Z
|
lib/util.py
|
ks-tec/Hydroponic
|
d9347f82698841d85c0a45908e8671b36c50ffce
|
[
"MIT"
] | null | null | null |
lib/util.py
|
ks-tec/Hydroponic
|
d9347f82698841d85c0a45908e8671b36c50ffce
|
[
"MIT"
] | null | null | null |
# MicroPython utility methods.
#
# Copyright (c) 2020 ks-tec
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to dealin the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sellcopies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE NOT LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS INTHE SOFTWARE.
def strtobool(value):
"""
This method convert string to bool.
Return False for values of the keywords "false" "f" "no" "n" "off" "0" or 0.
Or, return True for values of the keywords "true" "t" "yes" "y" "on" "1" or 1.
Or, othres return None.
Args:
value : string value
Return:
Return False for values of the keywords "false" "f" "no" "n" "off" "0" or 0.
Or, return True for values of the keywords "true" "t" "yes" "y" "on" "1" or 1.
Or, othres return None.
Raises:
TypeError : The type of parameter is not string.
ValueError : The parameter value can not be interpreted as a bool value.
"""
if type(value) is not str and value not in [0, 1]:
raise TypeError("The type of parameter value must be string.")
ret_value = None
if value.lower() in ["false", "f", "no", "n", "off", "0"] or value == 0:
ret_value = False
elif value.lower() in ["true", "t", "yes", "y", "on", "1"] or value == 1:
ret_value = True
else:
raise ValueError("not supported bool value.")
return ret_value
def conv_temperature_unit(value, unit):
"""
"""
if type(value) is str and value.upper() in ["C", "F"]:
raise TypeError("the type of paramter unit must be string.")
if unit.upper() == "C":
pass
elif unit.upper() == "F":
value = value * 1.8 + 32
else:
raise ValueError("not supported temperature unit.")
return value
| 35.147059
| 82
| 0.684519
|
def strtobool(value):
if type(value) is not str and value not in [0, 1]:
raise TypeError("The type of parameter value must be string.")
ret_value = None
if value.lower() in ["false", "f", "no", "n", "off", "0"] or value == 0:
ret_value = False
elif value.lower() in ["true", "t", "yes", "y", "on", "1"] or value == 1:
ret_value = True
else:
raise ValueError("not supported bool value.")
return ret_value
def conv_temperature_unit(value, unit):
if type(value) is str and value.upper() in ["C", "F"]:
raise TypeError("the type of paramter unit must be string.")
if unit.upper() == "C":
pass
elif unit.upper() == "F":
value = value * 1.8 + 32
else:
raise ValueError("not supported temperature unit.")
return value
| true
| true
|
1c45a56482a78277a224da1cf5efdb87161f30b9
| 626
|
py
|
Python
|
manage.py
|
agamgn/django-Tourism
|
ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a
|
[
"MIT"
] | 9
|
2019-06-30T06:34:22.000Z
|
2021-11-09T17:21:16.000Z
|
manage.py
|
agamgn/django-Tourism
|
ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a
|
[
"MIT"
] | 14
|
2019-12-22T02:04:18.000Z
|
2022-03-11T23:44:38.000Z
|
manage.py
|
agamgn/django-Tourism
|
ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a
|
[
"MIT"
] | 3
|
2019-06-30T06:35:57.000Z
|
2019-12-18T03:42:43.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treval.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.454545
| 73
| 0.682109
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treval.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c45a58e90e653de1bb431003c78566d25a7d67b
| 57,074
|
py
|
Python
|
improver/ensemble_copula_coupling/ensemble_copula_coupling.py
|
VictoriaLouiseS/improver
|
86470bff973e21fbd5f24e26047871ad3bc2f3db
|
[
"BSD-3-Clause"
] | null | null | null |
improver/ensemble_copula_coupling/ensemble_copula_coupling.py
|
VictoriaLouiseS/improver
|
86470bff973e21fbd5f24e26047871ad3bc2f3db
|
[
"BSD-3-Clause"
] | 3
|
2020-04-25T12:55:42.000Z
|
2020-07-23T11:50:46.000Z
|
improver/ensemble_copula_coupling/ensemble_copula_coupling.py
|
Kat-90/improver
|
a5c31be3430df429ae38e7c16e267fcbc2af1858
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module defines the plugins required for Ensemble Copula Coupling.
"""
import warnings
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError, InvalidCubeError
from scipy import stats
from improver import BasePlugin
from improver.calibration.utilities import convert_cube_data_to_2d
from improver.ensemble_copula_coupling.utilities import (
choose_set_of_percentiles,
concatenate_2d_array_with_2d_array_endpoints,
create_cube_with_percentiles,
get_bounds_of_distribution,
insert_lower_and_upper_endpoint_to_1d_array,
restore_non_percentile_dimensions,
)
from improver.metadata.probabilistic import (
extract_diagnostic_name,
find_percentile_coordinate,
find_threshold_coordinate,
)
from improver.utilities.cube_checker import (
check_cube_coordinates,
check_for_x_and_y_axes,
)
from improver.utilities.cube_manipulation import (
MergeCubes,
enforce_coordinate_ordering,
get_dim_coord_names,
)
from improver.utilities.indexing_operations import choose
class RebadgePercentilesAsRealizations(BasePlugin):
"""
Class to rebadge percentiles as ensemble realizations.
This will allow the quantisation to percentiles to be completed, without
a subsequent EnsembleReordering step to restore spatial correlations,
if required.
"""
@staticmethod
def process(cube, ensemble_realization_numbers=None):
"""
Rebadge percentiles as ensemble realizations. The ensemble
realization numbering will depend upon the number of percentiles in
the input cube i.e. 0, 1, 2, 3, ..., n-1, if there are n percentiles.
Args:
cube (iris.cube.Cube):
Cube containing a percentile coordinate, which will be
rebadged as ensemble realization.
ensemble_realization_numbers (numpy.ndarray):
An array containing the ensemble numbers required in the output
realization coordinate. Default is None, meaning the
realization coordinate will be numbered 0, 1, 2 ... n-1 for n
percentiles on the input cube.
Raises:
InvalidCubeError:
If the realization coordinate already exists on the cube.
"""
percentile_coord_name = find_percentile_coordinate(cube).name()
if ensemble_realization_numbers is None:
ensemble_realization_numbers = np.arange(
len(cube.coord(percentile_coord_name).points), dtype=np.int32
)
cube.coord(percentile_coord_name).points = ensemble_realization_numbers
# we can't rebadge if the realization coordinate already exists:
try:
realization_coord = cube.coord("realization")
except CoordinateNotFoundError:
realization_coord = None
if realization_coord:
raise InvalidCubeError(
"Cannot rebadge percentile coordinate to realization "
"coordinate because a realization coordinate already exists."
)
cube.coord(percentile_coord_name).rename("realization")
cube.coord("realization").units = "1"
cube.coord("realization").points = cube.coord("realization").points.astype(
np.int32
)
return cube
class ResamplePercentiles(BasePlugin):
"""
Class for resampling percentiles from an existing set of percentiles.
In combination with the Ensemble Reordering plugin, this is a variant of
Ensemble Copula Coupling.
This class includes the ability to linearly interpolate from an
input set of percentiles to a different output set of percentiles.
"""
def __init__(self, ecc_bounds_warning=False):
"""
Initialise the class.
Args:
ecc_bounds_warning (bool):
If true and ECC bounds are exceeded by the percentile values,
a warning will be generated rather than an exception.
Default value is FALSE.
"""
self.ecc_bounds_warning = ecc_bounds_warning
def _add_bounds_to_percentiles_and_forecast_at_percentiles(
self, percentiles, forecast_at_percentiles, bounds_pairing
):
"""
Padding of the lower and upper bounds of the percentiles for a
given phenomenon, and padding of forecast values using the
constant lower and upper bounds.
Args:
percentiles (numpy.ndarray):
Array of percentiles from a Cumulative Distribution Function.
forecast_at_percentiles (numpy.ndarray):
Array containing the underlying forecast values at each
percentile.
bounds_pairing (tuple):
Lower and upper bound to be used as the ends of the
cumulative distribution function.
Raises:
ValueError: If the percentile points are outside the ECC bounds
and self.ecc_bounds_warning is False.
ValueError: If the percentiles are not in ascending order.
Warns:
Warning: If the percentile points are outside the ECC bounds
and self.ecc_bounds_warning is True.
"""
lower_bound, upper_bound = bounds_pairing
percentiles = insert_lower_and_upper_endpoint_to_1d_array(percentiles, 0, 100)
forecast_at_percentiles_with_endpoints = concatenate_2d_array_with_2d_array_endpoints(
forecast_at_percentiles, lower_bound, upper_bound
)
if np.any(np.diff(forecast_at_percentiles_with_endpoints) < 0):
out_of_bounds_vals = forecast_at_percentiles_with_endpoints[
np.where(np.diff(forecast_at_percentiles_with_endpoints) < 0)
]
msg = (
"Forecast values exist that fall outside the expected extrema "
"values that are defined as bounds in "
"ensemble_copula_coupling/constants.py. "
"Applying the extrema values as end points to the distribution "
"would result in non-monotonically increasing values. "
"The defined extremes are {}, whilst the following forecast "
"values exist outside this range: {}.".format(
bounds_pairing, out_of_bounds_vals
)
)
if self.ecc_bounds_warning:
warn_msg = msg + (
" The percentile values that have "
"exceeded the existing bounds will be used "
"as new bounds."
)
warnings.warn(warn_msg)
if upper_bound < forecast_at_percentiles_with_endpoints.max():
upper_bound = forecast_at_percentiles_with_endpoints.max()
if lower_bound > forecast_at_percentiles_with_endpoints.min():
lower_bound = forecast_at_percentiles_with_endpoints.min()
forecast_at_percentiles_with_endpoints = concatenate_2d_array_with_2d_array_endpoints(
forecast_at_percentiles, lower_bound, upper_bound
)
else:
raise ValueError(msg)
if np.any(np.diff(percentiles) < 0):
msg = (
"The percentiles must be in ascending order."
"The input percentiles were {}".format(percentiles)
)
raise ValueError(msg)
return percentiles, forecast_at_percentiles_with_endpoints
def _interpolate_percentiles(
self,
forecast_at_percentiles,
desired_percentiles,
bounds_pairing,
percentile_coord_name,
):
"""
Interpolation of forecast for a set of percentiles from an initial
set of percentiles to a new set of percentiles. This is constructed
by linearly interpolating between the original set of percentiles
to a new set of percentiles.
Args:
forecast_at_percentiles (iris.cube.Cube):
Cube containing a percentile coordinate.
desired_percentiles (numpy.ndarray):
Array of the desired percentiles.
bounds_pairing (tuple):
Lower and upper bound to be used as the ends of the
cumulative distribution function.
percentile_coord_name (str):
Name of required percentile coordinate.
Returns:
iris.cube.Cube:
Cube containing values for the required diagnostic e.g.
air_temperature at the required percentiles.
"""
original_percentiles = forecast_at_percentiles.coord(
percentile_coord_name
).points
# Ensure that the percentile dimension is first, so that the
# conversion to a 2d array produces data in the desired order.
enforce_coordinate_ordering(forecast_at_percentiles, percentile_coord_name)
forecast_at_reshaped_percentiles = convert_cube_data_to_2d(
forecast_at_percentiles, coord=percentile_coord_name
)
(
original_percentiles,
forecast_at_reshaped_percentiles,
) = self._add_bounds_to_percentiles_and_forecast_at_percentiles(
original_percentiles, forecast_at_reshaped_percentiles, bounds_pairing
)
forecast_at_interpolated_percentiles = np.empty(
(len(desired_percentiles), forecast_at_reshaped_percentiles.shape[0]),
dtype=np.float32,
)
for index in range(forecast_at_reshaped_percentiles.shape[0]):
forecast_at_interpolated_percentiles[:, index] = np.interp(
desired_percentiles,
original_percentiles,
forecast_at_reshaped_percentiles[index, :],
)
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
forecast_at_percentiles_data = restore_non_percentile_dimensions(
forecast_at_interpolated_percentiles,
next(forecast_at_percentiles.slices_over(percentile_coord_name)),
len(desired_percentiles),
)
template_cube = next(forecast_at_percentiles.slices_over(percentile_coord_name))
template_cube.remove_coord(percentile_coord_name)
percentile_cube = create_cube_with_percentiles(
desired_percentiles, template_cube, forecast_at_percentiles_data,
)
return percentile_cube
def process(
self, forecast_at_percentiles, no_of_percentiles=None, sampling="quantile"
):
"""
1. Creates a list of percentiles.
2. Accesses the lower and upper bound pair of the forecast values,
in order to specify lower and upper bounds for the percentiles.
3. Interpolate the percentile coordinate into an alternative
set of percentiles using linear interpolation.
Args:
forecast_at_percentiles (iris.cube.Cube):
Cube expected to contain a percentile coordinate.
no_of_percentiles (int or None):
Number of percentiles
If None, the number of percentiles within the input
forecast_at_percentiles cube is used as the
number of percentiles.
sampling (str):
Type of sampling of the distribution to produce a set of
percentiles e.g. quantile or random.
Accepted options for sampling are:
* Quantile: A regular set of equally-spaced percentiles aimed
at dividing a Cumulative Distribution Function into
blocks of equal probability.
* Random: A random set of ordered percentiles.
Returns:
iris.cube.Cube:
Cube with forecast values at the desired set of percentiles.
The percentile coordinate is always the zeroth dimension.
"""
percentile_coord = find_percentile_coordinate(forecast_at_percentiles)
if no_of_percentiles is None:
no_of_percentiles = len(
forecast_at_percentiles.coord(percentile_coord).points
)
percentiles = choose_set_of_percentiles(no_of_percentiles, sampling=sampling)
cube_units = forecast_at_percentiles.units
bounds_pairing = get_bounds_of_distribution(
forecast_at_percentiles.name(), cube_units
)
forecast_at_percentiles = self._interpolate_percentiles(
forecast_at_percentiles,
percentiles,
bounds_pairing,
percentile_coord.name(),
)
return forecast_at_percentiles
class ConvertProbabilitiesToPercentiles(BasePlugin):
"""
Class for generating percentiles from probabilities.
In combination with the Ensemble Reordering plugin, this is a variant
Ensemble Copula Coupling.
This class includes the ability to interpolate between probabilities
specified using multiple thresholds in order to generate the percentiles,
see Figure 1 from Flowerdew, 2014.
Scientific Reference:
Flowerdew, J., 2014.
Calibrated ensemble reliability whilst preserving spatial structure.
Tellus Series A, Dynamic Meteorology and Oceanography, 66, 22662.
"""
def __init__(self, ecc_bounds_warning=False):
"""
Initialise the class.
Args:
ecc_bounds_warning (bool):
If true and ECC bounds are exceeded by the percentile values,
a warning will be generated rather than an exception.
Default value is FALSE.
"""
self.ecc_bounds_warning = ecc_bounds_warning
def _add_bounds_to_thresholds_and_probabilities(
self, threshold_points, probabilities_for_cdf, bounds_pairing
):
"""
Padding of the lower and upper bounds of the distribution for a
given phenomenon for the threshold_points, and padding of
probabilities of 0 and 1 to the forecast probabilities.
Args:
threshold_points (numpy.ndarray):
Array of threshold values used to calculate the probabilities.
probabilities_for_cdf (numpy.ndarray):
Array containing the probabilities used for constructing an
cumulative distribution function i.e. probabilities
below threshold.
bounds_pairing (tuple):
Lower and upper bound to be used as the ends of the
cumulative distribution function.
Returns:
(tuple): tuple containing:
**threshold_points** (numpy.ndarray):
Array of threshold values padded with the lower and upper
bound of the distribution.
**probabilities_for_cdf** (numpy.ndarray):
Array containing the probabilities padded with 0 and 1 at
each end.
Raises:
ValueError: If the thresholds exceed the ECC bounds for
the diagnostic and self.ecc_bounds_warning is False.
Warns:
Warning: If the thresholds exceed the ECC bounds for
the diagnostic and self.ecc_bounds_warning is True.
"""
lower_bound, upper_bound = bounds_pairing
threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(
threshold_points, lower_bound, upper_bound
)
probabilities_for_cdf = concatenate_2d_array_with_2d_array_endpoints(
probabilities_for_cdf, 0, 1
)
if np.any(np.diff(threshold_points_with_endpoints) < 0):
msg = (
"The calculated threshold values {} are not in ascending "
"order as required for the cumulative distribution "
"function (CDF). This is due to the threshold values "
"exceeding the range given by the ECC bounds {}.".format(
threshold_points_with_endpoints, bounds_pairing
)
)
# If ecc_bounds_warning has been set, generate a warning message
# rather than raising an exception so that subsequent processing
# can continue. Then apply the new bounds as necessary to
# ensure the threshold values and endpoints are in ascending
# order and avoid problems further along the processing chain.
if self.ecc_bounds_warning:
warn_msg = msg + (
" The threshold points that have "
"exceeded the existing bounds will be used "
"as new bounds."
)
warnings.warn(warn_msg)
if upper_bound < max(threshold_points_with_endpoints):
upper_bound = max(threshold_points_with_endpoints)
if lower_bound > min(threshold_points_with_endpoints):
lower_bound = min(threshold_points_with_endpoints)
threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(
threshold_points, lower_bound, upper_bound
)
else:
raise ValueError(msg)
return threshold_points_with_endpoints, probabilities_for_cdf
def _probabilities_to_percentiles(
self, forecast_probabilities, percentiles, bounds_pairing
):
"""
Conversion of probabilities to percentiles through the construction
of an cumulative distribution function. This is effectively
constructed by linear interpolation from the probabilities associated
with each threshold to a set of percentiles.
Args:
forecast_probabilities (iris.cube.Cube):
Cube with a threshold coordinate.
percentiles (numpy.ndarray):
Array of percentiles, at which the corresponding values will be
calculated.
bounds_pairing (tuple):
Lower and upper bound to be used as the ends of the
cumulative distribution function.
Returns:
iris.cube.Cube:
Cube containing values for the required diagnostic e.g.
air_temperature at the required percentiles.
Raises:
NotImplementedError: If the threshold coordinate has an
spp__relative_to_threshold attribute that is not either
"above" or "below".
Warns:
Warning: If the probability values are not ascending, so the
resulting cdf is not monotonically increasing.
"""
threshold_coord = find_threshold_coordinate(forecast_probabilities)
threshold_unit = threshold_coord.units
threshold_points = threshold_coord.points
# Ensure that the percentile dimension is first, so that the
# conversion to a 2d array produces data in the desired order.
enforce_coordinate_ordering(forecast_probabilities, threshold_coord.name())
prob_slices = convert_cube_data_to_2d(
forecast_probabilities, coord=threshold_coord.name()
)
# The requirement below for a monotonically changing probability
# across thresholds can be thwarted by precision errors of order 1E-10,
# as such, here we round to a precision of 9 decimal places.
prob_slices = np.around(prob_slices, 9)
# Invert probabilities for data thresholded above thresholds.
relation = find_threshold_coordinate(forecast_probabilities).attributes[
"spp__relative_to_threshold"
]
if relation == "above":
probabilities_for_cdf = 1 - prob_slices
elif relation == "below":
probabilities_for_cdf = prob_slices
else:
msg = (
"Probabilities to percentiles only implemented for "
"thresholds above or below a given value."
"The relation to threshold is given as {}".format(relation)
)
raise NotImplementedError(msg)
(
threshold_points,
probabilities_for_cdf,
) = self._add_bounds_to_thresholds_and_probabilities(
threshold_points, probabilities_for_cdf, bounds_pairing
)
if np.any(np.diff(probabilities_for_cdf) < 0):
msg = (
"The probability values used to construct the "
"Cumulative Distribution Function (CDF) "
"must be ascending i.e. in order to yield "
"a monotonically increasing CDF."
"The probabilities are {}".format(probabilities_for_cdf)
)
warnings.warn(msg)
# Convert percentiles into fractions.
percentiles_as_fractions = np.array(
[x / 100.0 for x in percentiles], dtype=np.float32
)
forecast_at_percentiles = (
# pylint: disable=unsubscriptable-object
np.empty(
(len(percentiles), probabilities_for_cdf.shape[0]), dtype=np.float32
)
)
# pylint: disable=unsubscriptable-object
for index in range(probabilities_for_cdf.shape[0]):
forecast_at_percentiles[:, index] = np.interp(
percentiles_as_fractions,
probabilities_for_cdf[index, :],
threshold_points,
)
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
forecast_at_percentiles = restore_non_percentile_dimensions(
forecast_at_percentiles,
next(forecast_probabilities.slices_over(threshold_coord)),
len(percentiles),
)
template_cube = next(forecast_probabilities.slices_over(threshold_coord.name()))
template_cube.rename(extract_diagnostic_name(template_cube.name()))
template_cube.remove_coord(threshold_coord.name())
percentile_cube = create_cube_with_percentiles(
percentiles,
template_cube,
forecast_at_percentiles,
cube_unit=threshold_unit,
)
return percentile_cube
def process(
self,
forecast_probabilities,
no_of_percentiles=None,
percentiles=None,
sampling="quantile",
):
"""
1. Concatenates cubes with a threshold coordinate.
2. Creates a list of percentiles.
3. Accesses the lower and upper bound pair to find the ends of the
cumulative distribution function.
4. Convert the threshold coordinate into
values at a set of percentiles using linear interpolation,
see Figure 1 from Flowerdew, 2014.
Args:
forecast_probabilities (iris.cube.Cube):
Cube containing a threshold coordinate.
no_of_percentiles (int):
Number of percentiles. If None and percentiles is not set,
the number of thresholds within the input
forecast_probabilities cube is used as the number of
percentiles. This argument is mutually exclusive with
percentiles.
percentiles (list of float):
The desired percentile values in the interval [0, 100].
This argument is mutually exclusive with no_of_percentiles.
sampling (str):
Type of sampling of the distribution to produce a set of
percentiles e.g. quantile or random.
Accepted options for sampling are:
* Quantile: A regular set of equally-spaced percentiles aimed
at dividing a Cumulative Distribution Function into
blocks of equal probability.
* Random: A random set of ordered percentiles.
Returns:
iris.cube.Cube:
Cube with forecast values at the desired set of percentiles.
The threshold coordinate is always the zeroth dimension.
Raises:
ValueError: If both no_of_percentiles and percentiles are provided
"""
if no_of_percentiles is not None and percentiles is not None:
raise ValueError(
"Cannot specify both no_of_percentiles and percentiles to "
"{}".format(self.__class__.__name__)
)
threshold_coord = find_threshold_coordinate(forecast_probabilities)
phenom_name = extract_diagnostic_name(forecast_probabilities.name())
if no_of_percentiles is None:
no_of_percentiles = len(
forecast_probabilities.coord(threshold_coord.name()).points
)
if percentiles is None:
percentiles = choose_set_of_percentiles(
no_of_percentiles, sampling=sampling
)
elif not isinstance(percentiles, (tuple, list)):
percentiles = [percentiles]
percentiles = np.array(percentiles, dtype=np.float32)
cube_units = forecast_probabilities.coord(threshold_coord.name()).units
bounds_pairing = get_bounds_of_distribution(phenom_name, cube_units)
# If a cube still has multiple realizations, slice over these to reduce
# the memory requirements into manageable chunks.
try:
slices_over_realization = forecast_probabilities.slices_over("realization")
except CoordinateNotFoundError:
slices_over_realization = [forecast_probabilities]
cubelist = iris.cube.CubeList([])
for cube_realization in slices_over_realization:
cubelist.append(
self._probabilities_to_percentiles(
cube_realization, percentiles, bounds_pairing
)
)
forecast_at_percentiles = cubelist.merge_cube()
return forecast_at_percentiles
class ConvertLocationAndScaleParameters:
"""
Base Class to support the plugins that compute percentiles and
probabilities from the location and scale parameters.
"""
def __init__(self, distribution="norm", shape_parameters=None):
"""
Initialise the class.
In order to construct percentiles or probabilities from the location
or scale parameter, the distribution for the resulting output needs
to be selected. For use with the outputs from EMOS, where it has been
assumed that the outputs from minimising the CRPS follow a particular
distribution, then the same distribution should be selected, as used
for the CRPS minimisation. The conversion to percentiles and
probabilities from the location and scale parameter relies upon
functionality within scipy.stats.
Args:
distribution (str):
Name of a distribution supported by scipy.stats.
shape_parameters (numpy.ndarray or None):
For use with distributions in scipy.stats (e.g. truncnorm) that
require the specification of shape parameters to be able to
define the shape of the distribution. For the truncated normal
distribution, the shape parameters should be appropriate for
the distribution constructed from the location and scale
parameters provided.
Please note that for use with
:meth:`~improver.calibration.\
ensemble_calibration.ContinuousRankedProbabilityScoreMinimisers.\
calculate_truncated_normal_crps`,
the shape parameters for a truncated normal distribution with
a lower bound of zero should be [0, np.inf].
"""
try:
self.distribution = getattr(stats, distribution)
except AttributeError as err:
msg = (
"The distribution requested {} is not a valid distribution "
"in scipy.stats. {}".format(distribution, err)
)
raise AttributeError(msg)
if shape_parameters is None:
if self.distribution.name == "truncnorm":
raise ValueError(
"For the truncated normal distribution, "
"shape parameters must be specified."
)
shape_parameters = []
self.shape_parameters = shape_parameters
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = (
"<ConvertLocationAndScaleParameters: distribution: {}; "
"shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _rescale_shape_parameters(self, location_parameter, scale_parameter):
"""
Rescale the shape parameters for the desired location and scale
parameters for the truncated normal distribution. The shape parameters
for any other distribution will remain unchanged.
For the truncated normal distribution, if the shape parameters are not
rescaled, then :data:`scipy.stats.truncnorm` will assume that the shape
parameters are appropriate for a standard normal distribution. As the
aim is to construct a distribution using specific values for the
location and scale parameters, the assumption of a standard normal
distribution is not appropriate. Therefore the shape parameters are
rescaled using the equations:
.. math::
a\\_rescaled = (a - location\\_parameter)/scale\\_parameter
b\\_rescaled = (b - location\\_parameter)/scale\\_parameter
Please see :data:`scipy.stats.truncnorm` for some further information.
Args:
location_parameter (numpy.ndarray):
Location parameter to be used to scale the shape parameters.
scale_parameter (numpy.ndarray):
Scale parameter to be used to scale the shape parameters.
"""
if self.distribution.name == "truncnorm":
rescaled_values = []
for value in self.shape_parameters:
rescaled_values.append((value - location_parameter) / scale_parameter)
self.shape_parameters = rescaled_values
class ConvertLocationAndScaleParametersToPercentiles(
BasePlugin, ConvertLocationAndScaleParameters
):
"""
Plugin focusing on generating percentiles from location and scale
parameters. In combination with the EnsembleReordering plugin, this is
Ensemble Copula Coupling.
"""
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = (
"<ConvertLocationAndScaleParametersToPercentiles: "
"distribution: {}; shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _location_and_scale_parameters_to_percentiles(
self, location_parameter, scale_parameter, template_cube, percentiles
):
"""
Function returning percentiles based on the supplied location and
scale parameters.
Args:
location_parameter (iris.cube.Cube):
Location parameter of calibrated distribution.
scale_parameter (iris.cube.Cube):
Scale parameter of the calibrated distribution.
template_cube (iris.cube.Cube):
Template cube containing either a percentile or realization
coordinate. All coordinates apart from the percentile or
realization coordinate will be copied from the template cube.
Metadata will also be copied from this cube.
percentiles (list):
Percentiles at which to calculate the value of the phenomenon
at.
Returns:
iris.cube.Cube:
Cube containing the values for the phenomenon at each of the
percentiles requested.
Raises:
ValueError: If any of the resulting percentile values are
nans and these nans are not caused by a scale parameter of
zero.
"""
# Remove any mask that may be applied to location and scale parameters
# and replace with ones
location_data = np.ma.filled(location_parameter.data, 1).flatten()
scale_data = np.ma.filled(scale_parameter.data, 1).flatten()
# Convert percentiles into fractions.
percentiles = np.array([x / 100.0 for x in percentiles], dtype=np.float32)
result = np.zeros((len(percentiles), location_data.shape[0]), dtype=np.float32)
self._rescale_shape_parameters(location_data, np.sqrt(scale_data))
percentile_method = self.distribution(
*self.shape_parameters, loc=location_data, scale=np.sqrt(scale_data)
)
# Loop over percentiles, and use the distribution as the
# "percentile_method" with the location and scale parameter to
# calculate the values at each percentile.
for index, percentile in enumerate(percentiles):
percentile_list = np.repeat(percentile, len(location_data))
result[index, :] = percentile_method.ppf(percentile_list)
# If percent point function (PPF) returns NaNs, fill in
# mean instead of NaN values. NaN will only be generated if the
# variance is zero. Therefore, if the variance is zero, the mean
# value is used for all gridpoints with a NaN.
if np.any(scale_data == 0):
nan_index = np.argwhere(np.isnan(result[index, :]))
result[index, nan_index] = location_data[nan_index]
if np.any(np.isnan(result)):
msg = (
"NaNs are present within the result for the {} "
"percentile. Unable to calculate the percent point "
"function."
)
raise ValueError(msg)
# Convert percentiles back into percentages.
percentiles = [x * 100.0 for x in percentiles]
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
result = result.reshape((len(percentiles),) + location_parameter.data.shape)
for prob_coord_name in ["realization", "percentile"]:
if template_cube.coords(prob_coord_name, dim_coords=True):
prob_coord = template_cube.coord(prob_coord_name)
template_slice = next(template_cube.slices_over(prob_coord))
template_slice.remove_coord(prob_coord)
percentile_cube = create_cube_with_percentiles(
percentiles, template_slice, result
)
# Define a mask to be reapplied later
mask = np.logical_or(
np.ma.getmaskarray(location_parameter.data),
np.ma.getmaskarray(scale_parameter.data),
)
# Make the mask defined above fit the data size and then apply to the
# percentile cube.
mask_array = np.stack([mask] * len(percentiles))
percentile_cube.data = np.ma.masked_where(mask_array, percentile_cube.data)
# Remove cell methods associated with finding the ensemble mean
percentile_cube.cell_methods = {}
return percentile_cube
def process(
self,
location_parameter,
scale_parameter,
template_cube,
no_of_percentiles=None,
percentiles=None,
):
"""
Generate ensemble percentiles from the location and scale parameters.
Args:
location_parameter (iris.cube.Cube):
Cube containing the location parameters.
scale_parameter (iris.cube.Cube):
Cube containing the scale parameters.
template_cube (iris.cube.Cube):
Template cube containing either a percentile or realization
coordinate. All coordinates apart from the percentile or
realization coordinate will be copied from the template cube.
Metadata will also be copied from this cube.
no_of_percentiles (int):
Integer defining the number of percentiles that will be
calculated from the location and scale parameters.
percentiles (list):
List of percentiles that will be generated from the location
and scale parameters provided.
Returns:
iris.cube.Cube:
Cube for calibrated percentiles.
The percentile coordinate is always the zeroth dimension.
Raises:
ValueError: Ensure that it is not possible to supply
"no_of_percentiles" and "percentiles" simultaneously
as keyword arguments.
"""
if no_of_percentiles and percentiles:
msg = (
"Please specify either the number of percentiles or "
"provide a list of percentiles. The number of percentiles "
"provided was {} and the list of percentiles "
"provided was {}".format(no_of_percentiles, percentiles)
)
raise ValueError(msg)
if no_of_percentiles:
percentiles = choose_set_of_percentiles(no_of_percentiles)
calibrated_forecast_percentiles = self._location_and_scale_parameters_to_percentiles(
location_parameter, scale_parameter, template_cube, percentiles
)
return calibrated_forecast_percentiles
class ConvertLocationAndScaleParametersToProbabilities(
BasePlugin, ConvertLocationAndScaleParameters
):
"""
Plugin to generate probabilities relative to given thresholds from the
location and scale parameters of a distribution.
"""
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = (
"<ConvertLocationAndScaleParametersToProbabilities: "
"distribution: {}; shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _check_template_cube(self, cube):
"""
The template cube is expected to contain a leading threshold dimension
followed by spatial (y/x) dimensions. This check raises an error if
this is not the case. If the cube contains the expected dimensions,
a threshold leading order is enforced.
Args:
cube (iris.cube.Cube):
A cube whose dimensions are checked to ensure they match what
is expected.
Raises:
ValueError: If cube is not of the expected dimensions.
"""
check_for_x_and_y_axes(cube, require_dim_coords=True)
dim_coords = get_dim_coord_names(cube)
msg = (
"{} expects a cube with only a leading threshold dimension, "
"followed by spatial (y/x) dimensions. "
"Got dimensions: {}".format(self.__class__.__name__, dim_coords)
)
try:
threshold_coord = find_threshold_coordinate(cube)
except CoordinateNotFoundError:
raise ValueError(msg)
if len(dim_coords) < 4:
enforce_coordinate_ordering(cube, threshold_coord.name())
return
raise ValueError(msg)
@staticmethod
def _check_unit_compatibility(
location_parameter, scale_parameter, probability_cube_template
):
"""
The location parameter, scale parameters, and threshold values come
from three different cubes. They should all be in the same base unit,
with the units of the scale parameter being the squared units of the
location parameter and threshold values. This is a sanity check to
ensure the units are as expected, converting units of the location
parameter and scale parameter if possible.
Args:
location_parameter (iris.cube.Cube):
Cube of location parameter values.
scale_parameter (iris.cube.Cube):
Cube of scale parameter values.
probability_cube_template (iris.cube.Cube):
Cube containing threshold values.
Raises:
ValueError: If units of input cubes are not compatible.
"""
threshold_units = find_threshold_coordinate(probability_cube_template).units
try:
location_parameter.convert_units(threshold_units)
scale_parameter.convert_units(threshold_units ** 2)
except ValueError as err:
msg = (
"Error: {} This is likely because the mean "
"variance and template cube threshold units are "
"not equivalent/compatible.".format(err)
)
raise ValueError(msg)
def _location_and_scale_parameters_to_probabilities(
self, location_parameter, scale_parameter, probability_cube_template
):
"""
Function returning probabilities relative to provided thresholds based
on the supplied location and scale parameters.
Args:
location_parameter (iris.cube.Cube):
Predictor for the calibrated forecast location parameter.
scale_parameter (iris.cube.Cube):
Scale parameter for the calibrated forecast.
probability_cube_template (iris.cube.Cube):
A probability cube that has a threshold coordinate, where the
probabilities are defined as above or below the threshold by
the spp__relative_to_threshold attribute. This cube matches
the desired output cube format.
Returns:
iris.cube.Cube:
Cube containing the data expressed as probabilities relative to
the provided thresholds in the way described by
spp__relative_to_threshold.
"""
# Define a mask to be reapplied later
loc_mask = np.ma.getmaskarray(location_parameter.data)
scale_mask = np.ma.getmaskarray(scale_parameter.data)
mask = np.logical_or(loc_mask, scale_mask)
# Remove any mask that may be applied to location and scale parameters
# and replace with ones
location_parameter.data = np.ma.filled(location_parameter.data, 1)
scale_parameter.data = np.ma.filled(scale_parameter.data, 1)
thresholds = find_threshold_coordinate(probability_cube_template).points
relative_to_threshold = find_threshold_coordinate(
probability_cube_template
).attributes["spp__relative_to_threshold"]
self._rescale_shape_parameters(
location_parameter.data.flatten(), np.sqrt(scale_parameter.data).flatten()
)
# Loop over thresholds, and use the specified distribution with the
# location and scale parameter to calculate the probabilities relative
# to each threshold.
probabilities = np.empty_like(probability_cube_template.data)
distribution = self.distribution(
*self.shape_parameters,
loc=location_parameter.data.flatten(),
scale=np.sqrt(scale_parameter.data.flatten()),
)
probability_method = distribution.cdf
if relative_to_threshold == "above":
probability_method = distribution.sf
for index, threshold in enumerate(thresholds):
# pylint: disable=unsubscriptable-object
probabilities[index, ...] = np.reshape(
probability_method(threshold), probabilities.shape[1:]
)
probability_cube = probability_cube_template.copy(data=probabilities)
# Make the mask defined above fit the data size and then apply to the
# probability cube.
mask_array = np.array([mask] * len(probabilities))
probability_cube.data = np.ma.masked_where(mask_array, probability_cube.data)
return probability_cube
def process(self, location_parameter, scale_parameter, probability_cube_template):
"""
Generate probabilities from the location and scale parameters of the
distribution.
Args:
location_parameter (iris.cube.Cube):
Cube containing the location parameters.
scale_parameter (iris.cube.Cube):
Cube containing the scale parameters.
probability_cube_template (iris.cube.Cube):
A probability cube that has a threshold coordinate, where the
probabilities are defined as above or below the threshold by
the spp__relative_to_threshold attribute. This cube matches
the desired output cube format.
Returns:
iris.cube.Cube:
A cube of diagnostic data expressed as probabilities relative
to the thresholds found in the probability_cube_template.
"""
self._check_template_cube(probability_cube_template)
self._check_unit_compatibility(
location_parameter, scale_parameter, probability_cube_template
)
probability_cube = self._location_and_scale_parameters_to_probabilities(
location_parameter, scale_parameter, probability_cube_template
)
return probability_cube
class EnsembleReordering(BasePlugin):
"""
Plugin for applying the reordering step of Ensemble Copula Coupling,
in order to generate ensemble realizations with multivariate structure
from percentiles. The percentiles are assumed to be in ascending order.
Reference:
Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013.
Uncertainty Quantification in Complex Simulation Models Using Ensemble
Copula Coupling.
Statistical Science, 28(4), pp.616-640.
"""
@staticmethod
def _recycle_raw_ensemble_realizations(
post_processed_forecast_percentiles,
raw_forecast_realizations,
percentile_coord_name,
):
"""
Function to determine whether there is a mismatch between the number
of percentiles and the number of raw forecast realizations. If more
percentiles are requested than ensemble realizations, then the ensemble
realizations are recycled. This assumes that the identity of the
ensemble realizations within the raw ensemble forecast is random, such
that the raw ensemble realizations are exchangeable. If fewer
percentiles are requested than ensemble realizations, then only the
first n ensemble realizations are used.
Args:
post_processed_forecast_percentiles (iris.cube.Cube):
Cube for post-processed percentiles.
The percentiles are assumed
to be in ascending order.
raw_forecast_realizations (iris.cube.Cube):
Cube containing the raw (not post-processed) forecasts.
percentile_coord_name (str):
Name of required percentile coordinate.
Returns:
iris cube.Cube:
Cube for the raw ensemble forecast, where the raw ensemble
realizations have either been recycled or constrained,
depending upon the number of percentiles present
in the post-processed forecast cube.
"""
plen = len(
post_processed_forecast_percentiles.coord(percentile_coord_name).points
)
mlen = len(raw_forecast_realizations.coord("realization").points)
if plen == mlen:
pass
else:
raw_forecast_realizations_extended = iris.cube.CubeList()
realization_list = []
mpoints = raw_forecast_realizations.coord("realization").points
# Loop over the number of percentiles and finding the
# corresponding ensemble realization number. The ensemble
# realization numbers are recycled e.g. 1, 2, 3, 1, 2, 3, etc.
for index in range(plen):
realization_list.append(mpoints[index % len(mpoints)])
# Assume that the ensemble realizations are ascending linearly.
new_realization_numbers = realization_list[0] + list(range(plen))
# Extract the realizations required in the realization_list from
# the raw_forecast_realizations. Edit the realization number as
# appropriate and append to a cubelist containing rebadged
# raw ensemble realizations.
for realization, index in zip(realization_list, new_realization_numbers):
constr = iris.Constraint(realization=realization)
raw_forecast_realization = raw_forecast_realizations.extract(constr)
raw_forecast_realization.coord("realization").points = index
raw_forecast_realizations_extended.append(raw_forecast_realization)
raw_forecast_realizations = MergeCubes()(
raw_forecast_realizations_extended, slice_over_realization=True
)
return raw_forecast_realizations
@staticmethod
def rank_ecc(
post_processed_forecast_percentiles,
raw_forecast_realizations,
random_ordering=False,
random_seed=None,
):
"""
Function to apply Ensemble Copula Coupling. This ranks the
post-processed forecast realizations based on a ranking determined from
the raw forecast realizations.
Args:
post_processed_forecast_percentiles (iris.cube.Cube):
Cube for post-processed percentiles. The percentiles are
assumed to be in ascending order.
raw_forecast_realizations (iris.cube.Cube):
Cube containing the raw (not post-processed) forecasts.
The probabilistic dimension is assumed to be the zeroth
dimension.
random_ordering (bool):
If random_ordering is True, the post-processed forecasts are
reordered randomly, rather than using the ordering of the
raw ensemble.
random_seed (int or None):
If random_seed is an integer, the integer value is used for
the random seed.
If random_seed is None, no random seed is set, so the random
values generated are not reproducible.
Returns:
iris.cube.Cube:
Cube for post-processed realizations where at a particular grid
point, the ranking of the values within the ensemble matches
the ranking from the raw ensemble.
"""
results = iris.cube.CubeList([])
for rawfc, calfc in zip(
raw_forecast_realizations.slices_over("time"),
post_processed_forecast_percentiles.slices_over("time"),
):
if random_seed is not None:
random_seed = int(random_seed)
random_seed = np.random.RandomState(random_seed)
random_data = random_seed.rand(*rawfc.data.shape)
if random_ordering:
# Returns the indices that would sort the array.
# As these indices are from a random dataset, only an argsort
# is used.
ranking = np.argsort(random_data, axis=0)
else:
# Lexsort returns the indices sorted firstly by the
# primary key, the raw forecast data (unless random_ordering
# is enabled), and secondly by the secondary key, an array of
# random data, in order to split tied values randomly.
sorting_index = np.lexsort((random_data, rawfc.data), axis=0)
# Returns the indices that would sort the array.
ranking = np.argsort(sorting_index, axis=0)
# Index the post-processed forecast data using the ranking array.
# The following uses a custom choose function that reproduces the
# required elements of the np.choose method without the limitation
# of having < 32 arrays or a leading dimension < 32 in the
# input data array. This function allows indexing of a 3d array
# using a 3d array.
mask = np.ma.getmask(calfc.data)
calfc.data = choose(ranking, calfc.data)
if mask is not np.ma.nomask:
calfc.data = np.ma.MaskedArray(calfc.data, mask, dtype=np.float32)
results.append(calfc)
# Ensure we haven't lost any dimensional coordinates with only one
# value in.
results = results.merge_cube()
results = check_cube_coordinates(post_processed_forecast_percentiles, results)
return results
def process(
self,
post_processed_forecast,
raw_forecast,
random_ordering=False,
random_seed=None,
):
"""
Reorder post-processed forecast using the ordering of the
raw ensemble.
Args:
post_processed_forecast (iris.cube.Cube):
The cube containing the post-processed
forecast realizations.
raw_forecast (iris.cube.Cube):
The cube containing the raw (not post-processed)
forecast.
random_ordering (bool):
If random_ordering is True, the post-processed forecasts are
reordered randomly, rather than using the ordering of the
raw ensemble.
random_seed (int):
If random_seed is an integer, the integer value is used for
the random seed.
If random_seed is None, no random seed is set, so the random
values generated are not reproducible.
Returns:
iris.cube.Cube:
Cube containing the new ensemble realizations where all points
within the dataset have been reordered in comparison to the
input percentiles.
"""
percentile_coord_name = find_percentile_coordinate(
post_processed_forecast
).name()
enforce_coordinate_ordering(post_processed_forecast, percentile_coord_name)
enforce_coordinate_ordering(raw_forecast, "realization")
raw_forecast = self._recycle_raw_ensemble_realizations(
post_processed_forecast, raw_forecast, percentile_coord_name
)
post_processed_forecast_realizations = self.rank_ecc(
post_processed_forecast,
raw_forecast,
random_ordering=random_ordering,
random_seed=random_seed,
)
plugin = RebadgePercentilesAsRealizations()
post_processed_forecast_realizations = plugin(
post_processed_forecast_realizations
)
enforce_coordinate_ordering(post_processed_forecast_realizations, "realization")
return post_processed_forecast_realizations
| 42.816204
| 102
| 0.644286
|
import warnings
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError, InvalidCubeError
from scipy import stats
from improver import BasePlugin
from improver.calibration.utilities import convert_cube_data_to_2d
from improver.ensemble_copula_coupling.utilities import (
choose_set_of_percentiles,
concatenate_2d_array_with_2d_array_endpoints,
create_cube_with_percentiles,
get_bounds_of_distribution,
insert_lower_and_upper_endpoint_to_1d_array,
restore_non_percentile_dimensions,
)
from improver.metadata.probabilistic import (
extract_diagnostic_name,
find_percentile_coordinate,
find_threshold_coordinate,
)
from improver.utilities.cube_checker import (
check_cube_coordinates,
check_for_x_and_y_axes,
)
from improver.utilities.cube_manipulation import (
MergeCubes,
enforce_coordinate_ordering,
get_dim_coord_names,
)
from improver.utilities.indexing_operations import choose
class RebadgePercentilesAsRealizations(BasePlugin):
@staticmethod
def process(cube, ensemble_realization_numbers=None):
percentile_coord_name = find_percentile_coordinate(cube).name()
if ensemble_realization_numbers is None:
ensemble_realization_numbers = np.arange(
len(cube.coord(percentile_coord_name).points), dtype=np.int32
)
cube.coord(percentile_coord_name).points = ensemble_realization_numbers
try:
realization_coord = cube.coord("realization")
except CoordinateNotFoundError:
realization_coord = None
if realization_coord:
raise InvalidCubeError(
"Cannot rebadge percentile coordinate to realization "
"coordinate because a realization coordinate already exists."
)
cube.coord(percentile_coord_name).rename("realization")
cube.coord("realization").units = "1"
cube.coord("realization").points = cube.coord("realization").points.astype(
np.int32
)
return cube
class ResamplePercentiles(BasePlugin):
def __init__(self, ecc_bounds_warning=False):
self.ecc_bounds_warning = ecc_bounds_warning
def _add_bounds_to_percentiles_and_forecast_at_percentiles(
self, percentiles, forecast_at_percentiles, bounds_pairing
):
lower_bound, upper_bound = bounds_pairing
percentiles = insert_lower_and_upper_endpoint_to_1d_array(percentiles, 0, 100)
forecast_at_percentiles_with_endpoints = concatenate_2d_array_with_2d_array_endpoints(
forecast_at_percentiles, lower_bound, upper_bound
)
if np.any(np.diff(forecast_at_percentiles_with_endpoints) < 0):
out_of_bounds_vals = forecast_at_percentiles_with_endpoints[
np.where(np.diff(forecast_at_percentiles_with_endpoints) < 0)
]
msg = (
"Forecast values exist that fall outside the expected extrema "
"values that are defined as bounds in "
"ensemble_copula_coupling/constants.py. "
"Applying the extrema values as end points to the distribution "
"would result in non-monotonically increasing values. "
"The defined extremes are {}, whilst the following forecast "
"values exist outside this range: {}.".format(
bounds_pairing, out_of_bounds_vals
)
)
if self.ecc_bounds_warning:
warn_msg = msg + (
" The percentile values that have "
"exceeded the existing bounds will be used "
"as new bounds."
)
warnings.warn(warn_msg)
if upper_bound < forecast_at_percentiles_with_endpoints.max():
upper_bound = forecast_at_percentiles_with_endpoints.max()
if lower_bound > forecast_at_percentiles_with_endpoints.min():
lower_bound = forecast_at_percentiles_with_endpoints.min()
forecast_at_percentiles_with_endpoints = concatenate_2d_array_with_2d_array_endpoints(
forecast_at_percentiles, lower_bound, upper_bound
)
else:
raise ValueError(msg)
if np.any(np.diff(percentiles) < 0):
msg = (
"The percentiles must be in ascending order."
"The input percentiles were {}".format(percentiles)
)
raise ValueError(msg)
return percentiles, forecast_at_percentiles_with_endpoints
def _interpolate_percentiles(
self,
forecast_at_percentiles,
desired_percentiles,
bounds_pairing,
percentile_coord_name,
):
original_percentiles = forecast_at_percentiles.coord(
percentile_coord_name
).points
# Ensure that the percentile dimension is first, so that the
# conversion to a 2d array produces data in the desired order.
enforce_coordinate_ordering(forecast_at_percentiles, percentile_coord_name)
forecast_at_reshaped_percentiles = convert_cube_data_to_2d(
forecast_at_percentiles, coord=percentile_coord_name
)
(
original_percentiles,
forecast_at_reshaped_percentiles,
) = self._add_bounds_to_percentiles_and_forecast_at_percentiles(
original_percentiles, forecast_at_reshaped_percentiles, bounds_pairing
)
forecast_at_interpolated_percentiles = np.empty(
(len(desired_percentiles), forecast_at_reshaped_percentiles.shape[0]),
dtype=np.float32,
)
for index in range(forecast_at_reshaped_percentiles.shape[0]):
forecast_at_interpolated_percentiles[:, index] = np.interp(
desired_percentiles,
original_percentiles,
forecast_at_reshaped_percentiles[index, :],
)
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
forecast_at_percentiles_data = restore_non_percentile_dimensions(
forecast_at_interpolated_percentiles,
next(forecast_at_percentiles.slices_over(percentile_coord_name)),
len(desired_percentiles),
)
template_cube = next(forecast_at_percentiles.slices_over(percentile_coord_name))
template_cube.remove_coord(percentile_coord_name)
percentile_cube = create_cube_with_percentiles(
desired_percentiles, template_cube, forecast_at_percentiles_data,
)
return percentile_cube
def process(
self, forecast_at_percentiles, no_of_percentiles=None, sampling="quantile"
):
percentile_coord = find_percentile_coordinate(forecast_at_percentiles)
if no_of_percentiles is None:
no_of_percentiles = len(
forecast_at_percentiles.coord(percentile_coord).points
)
percentiles = choose_set_of_percentiles(no_of_percentiles, sampling=sampling)
cube_units = forecast_at_percentiles.units
bounds_pairing = get_bounds_of_distribution(
forecast_at_percentiles.name(), cube_units
)
forecast_at_percentiles = self._interpolate_percentiles(
forecast_at_percentiles,
percentiles,
bounds_pairing,
percentile_coord.name(),
)
return forecast_at_percentiles
class ConvertProbabilitiesToPercentiles(BasePlugin):
def __init__(self, ecc_bounds_warning=False):
self.ecc_bounds_warning = ecc_bounds_warning
def _add_bounds_to_thresholds_and_probabilities(
self, threshold_points, probabilities_for_cdf, bounds_pairing
):
lower_bound, upper_bound = bounds_pairing
threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(
threshold_points, lower_bound, upper_bound
)
probabilities_for_cdf = concatenate_2d_array_with_2d_array_endpoints(
probabilities_for_cdf, 0, 1
)
if np.any(np.diff(threshold_points_with_endpoints) < 0):
msg = (
"The calculated threshold values {} are not in ascending "
"order as required for the cumulative distribution "
"function (CDF). This is due to the threshold values "
"exceeding the range given by the ECC bounds {}.".format(
threshold_points_with_endpoints, bounds_pairing
)
)
# If ecc_bounds_warning has been set, generate a warning message
# rather than raising an exception so that subsequent processing
# can continue. Then apply the new bounds as necessary to
# ensure the threshold values and endpoints are in ascending
# order and avoid problems further along the processing chain.
if self.ecc_bounds_warning:
warn_msg = msg + (
" The threshold points that have "
"exceeded the existing bounds will be used "
"as new bounds."
)
warnings.warn(warn_msg)
if upper_bound < max(threshold_points_with_endpoints):
upper_bound = max(threshold_points_with_endpoints)
if lower_bound > min(threshold_points_with_endpoints):
lower_bound = min(threshold_points_with_endpoints)
threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(
threshold_points, lower_bound, upper_bound
)
else:
raise ValueError(msg)
return threshold_points_with_endpoints, probabilities_for_cdf
def _probabilities_to_percentiles(
self, forecast_probabilities, percentiles, bounds_pairing
):
threshold_coord = find_threshold_coordinate(forecast_probabilities)
threshold_unit = threshold_coord.units
threshold_points = threshold_coord.points
# Ensure that the percentile dimension is first, so that the
# conversion to a 2d array produces data in the desired order.
enforce_coordinate_ordering(forecast_probabilities, threshold_coord.name())
prob_slices = convert_cube_data_to_2d(
forecast_probabilities, coord=threshold_coord.name()
)
# The requirement below for a monotonically changing probability
# across thresholds can be thwarted by precision errors of order 1E-10,
# as such, here we round to a precision of 9 decimal places.
prob_slices = np.around(prob_slices, 9)
# Invert probabilities for data thresholded above thresholds.
relation = find_threshold_coordinate(forecast_probabilities).attributes[
"spp__relative_to_threshold"
]
if relation == "above":
probabilities_for_cdf = 1 - prob_slices
elif relation == "below":
probabilities_for_cdf = prob_slices
else:
msg = (
"Probabilities to percentiles only implemented for "
"thresholds above or below a given value."
"The relation to threshold is given as {}".format(relation)
)
raise NotImplementedError(msg)
(
threshold_points,
probabilities_for_cdf,
) = self._add_bounds_to_thresholds_and_probabilities(
threshold_points, probabilities_for_cdf, bounds_pairing
)
if np.any(np.diff(probabilities_for_cdf) < 0):
msg = (
"The probability values used to construct the "
"Cumulative Distribution Function (CDF) "
"must be ascending i.e. in order to yield "
"a monotonically increasing CDF."
"The probabilities are {}".format(probabilities_for_cdf)
)
warnings.warn(msg)
# Convert percentiles into fractions.
percentiles_as_fractions = np.array(
[x / 100.0 for x in percentiles], dtype=np.float32
)
forecast_at_percentiles = (
# pylint: disable=unsubscriptable-object
np.empty(
(len(percentiles), probabilities_for_cdf.shape[0]), dtype=np.float32
)
)
# pylint: disable=unsubscriptable-object
for index in range(probabilities_for_cdf.shape[0]):
forecast_at_percentiles[:, index] = np.interp(
percentiles_as_fractions,
probabilities_for_cdf[index, :],
threshold_points,
)
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
forecast_at_percentiles = restore_non_percentile_dimensions(
forecast_at_percentiles,
next(forecast_probabilities.slices_over(threshold_coord)),
len(percentiles),
)
template_cube = next(forecast_probabilities.slices_over(threshold_coord.name()))
template_cube.rename(extract_diagnostic_name(template_cube.name()))
template_cube.remove_coord(threshold_coord.name())
percentile_cube = create_cube_with_percentiles(
percentiles,
template_cube,
forecast_at_percentiles,
cube_unit=threshold_unit,
)
return percentile_cube
def process(
self,
forecast_probabilities,
no_of_percentiles=None,
percentiles=None,
sampling="quantile",
):
if no_of_percentiles is not None and percentiles is not None:
raise ValueError(
"Cannot specify both no_of_percentiles and percentiles to "
"{}".format(self.__class__.__name__)
)
threshold_coord = find_threshold_coordinate(forecast_probabilities)
phenom_name = extract_diagnostic_name(forecast_probabilities.name())
if no_of_percentiles is None:
no_of_percentiles = len(
forecast_probabilities.coord(threshold_coord.name()).points
)
if percentiles is None:
percentiles = choose_set_of_percentiles(
no_of_percentiles, sampling=sampling
)
elif not isinstance(percentiles, (tuple, list)):
percentiles = [percentiles]
percentiles = np.array(percentiles, dtype=np.float32)
cube_units = forecast_probabilities.coord(threshold_coord.name()).units
bounds_pairing = get_bounds_of_distribution(phenom_name, cube_units)
# If a cube still has multiple realizations, slice over these to reduce
# the memory requirements into manageable chunks.
try:
slices_over_realization = forecast_probabilities.slices_over("realization")
except CoordinateNotFoundError:
slices_over_realization = [forecast_probabilities]
cubelist = iris.cube.CubeList([])
for cube_realization in slices_over_realization:
cubelist.append(
self._probabilities_to_percentiles(
cube_realization, percentiles, bounds_pairing
)
)
forecast_at_percentiles = cubelist.merge_cube()
return forecast_at_percentiles
class ConvertLocationAndScaleParameters:
def __init__(self, distribution="norm", shape_parameters=None):
try:
self.distribution = getattr(stats, distribution)
except AttributeError as err:
msg = (
"The distribution requested {} is not a valid distribution "
"in scipy.stats. {}".format(distribution, err)
)
raise AttributeError(msg)
if shape_parameters is None:
if self.distribution.name == "truncnorm":
raise ValueError(
"For the truncated normal distribution, "
"shape parameters must be specified."
)
shape_parameters = []
self.shape_parameters = shape_parameters
def __repr__(self):
result = (
"<ConvertLocationAndScaleParameters: distribution: {}; "
"shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _rescale_shape_parameters(self, location_parameter, scale_parameter):
if self.distribution.name == "truncnorm":
rescaled_values = []
for value in self.shape_parameters:
rescaled_values.append((value - location_parameter) / scale_parameter)
self.shape_parameters = rescaled_values
class ConvertLocationAndScaleParametersToPercentiles(
BasePlugin, ConvertLocationAndScaleParameters
):
def __repr__(self):
result = (
"<ConvertLocationAndScaleParametersToPercentiles: "
"distribution: {}; shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _location_and_scale_parameters_to_percentiles(
self, location_parameter, scale_parameter, template_cube, percentiles
):
# Remove any mask that may be applied to location and scale parameters
# and replace with ones
location_data = np.ma.filled(location_parameter.data, 1).flatten()
scale_data = np.ma.filled(scale_parameter.data, 1).flatten()
# Convert percentiles into fractions.
percentiles = np.array([x / 100.0 for x in percentiles], dtype=np.float32)
result = np.zeros((len(percentiles), location_data.shape[0]), dtype=np.float32)
self._rescale_shape_parameters(location_data, np.sqrt(scale_data))
percentile_method = self.distribution(
*self.shape_parameters, loc=location_data, scale=np.sqrt(scale_data)
)
# Loop over percentiles, and use the distribution as the
# "percentile_method" with the location and scale parameter to
# calculate the values at each percentile.
for index, percentile in enumerate(percentiles):
percentile_list = np.repeat(percentile, len(location_data))
result[index, :] = percentile_method.ppf(percentile_list)
# If percent point function (PPF) returns NaNs, fill in
# mean instead of NaN values. NaN will only be generated if the
# variance is zero. Therefore, if the variance is zero, the mean
# value is used for all gridpoints with a NaN.
if np.any(scale_data == 0):
nan_index = np.argwhere(np.isnan(result[index, :]))
result[index, nan_index] = location_data[nan_index]
if np.any(np.isnan(result)):
msg = (
"NaNs are present within the result for the {} "
"percentile. Unable to calculate the percent point "
"function."
)
raise ValueError(msg)
# Convert percentiles back into percentages.
percentiles = [x * 100.0 for x in percentiles]
# Reshape forecast_at_percentiles, so the percentiles dimension is
# first, and any other dimension coordinates follow.
result = result.reshape((len(percentiles),) + location_parameter.data.shape)
for prob_coord_name in ["realization", "percentile"]:
if template_cube.coords(prob_coord_name, dim_coords=True):
prob_coord = template_cube.coord(prob_coord_name)
template_slice = next(template_cube.slices_over(prob_coord))
template_slice.remove_coord(prob_coord)
percentile_cube = create_cube_with_percentiles(
percentiles, template_slice, result
)
# Define a mask to be reapplied later
mask = np.logical_or(
np.ma.getmaskarray(location_parameter.data),
np.ma.getmaskarray(scale_parameter.data),
)
# Make the mask defined above fit the data size and then apply to the
# percentile cube.
mask_array = np.stack([mask] * len(percentiles))
percentile_cube.data = np.ma.masked_where(mask_array, percentile_cube.data)
# Remove cell methods associated with finding the ensemble mean
percentile_cube.cell_methods = {}
return percentile_cube
def process(
self,
location_parameter,
scale_parameter,
template_cube,
no_of_percentiles=None,
percentiles=None,
):
if no_of_percentiles and percentiles:
msg = (
"Please specify either the number of percentiles or "
"provide a list of percentiles. The number of percentiles "
"provided was {} and the list of percentiles "
"provided was {}".format(no_of_percentiles, percentiles)
)
raise ValueError(msg)
if no_of_percentiles:
percentiles = choose_set_of_percentiles(no_of_percentiles)
calibrated_forecast_percentiles = self._location_and_scale_parameters_to_percentiles(
location_parameter, scale_parameter, template_cube, percentiles
)
return calibrated_forecast_percentiles
class ConvertLocationAndScaleParametersToProbabilities(
BasePlugin, ConvertLocationAndScaleParameters
):
def __repr__(self):
result = (
"<ConvertLocationAndScaleParametersToProbabilities: "
"distribution: {}; shape_parameters: {}>"
)
return result.format(self.distribution.name, self.shape_parameters)
def _check_template_cube(self, cube):
check_for_x_and_y_axes(cube, require_dim_coords=True)
dim_coords = get_dim_coord_names(cube)
msg = (
"{} expects a cube with only a leading threshold dimension, "
"followed by spatial (y/x) dimensions. "
"Got dimensions: {}".format(self.__class__.__name__, dim_coords)
)
try:
threshold_coord = find_threshold_coordinate(cube)
except CoordinateNotFoundError:
raise ValueError(msg)
if len(dim_coords) < 4:
enforce_coordinate_ordering(cube, threshold_coord.name())
return
raise ValueError(msg)
@staticmethod
def _check_unit_compatibility(
location_parameter, scale_parameter, probability_cube_template
):
threshold_units = find_threshold_coordinate(probability_cube_template).units
try:
location_parameter.convert_units(threshold_units)
scale_parameter.convert_units(threshold_units ** 2)
except ValueError as err:
msg = (
"Error: {} This is likely because the mean "
"variance and template cube threshold units are "
"not equivalent/compatible.".format(err)
)
raise ValueError(msg)
def _location_and_scale_parameters_to_probabilities(
self, location_parameter, scale_parameter, probability_cube_template
):
# Define a mask to be reapplied later
loc_mask = np.ma.getmaskarray(location_parameter.data)
scale_mask = np.ma.getmaskarray(scale_parameter.data)
mask = np.logical_or(loc_mask, scale_mask)
# Remove any mask that may be applied to location and scale parameters
# and replace with ones
location_parameter.data = np.ma.filled(location_parameter.data, 1)
scale_parameter.data = np.ma.filled(scale_parameter.data, 1)
thresholds = find_threshold_coordinate(probability_cube_template).points
relative_to_threshold = find_threshold_coordinate(
probability_cube_template
).attributes["spp__relative_to_threshold"]
self._rescale_shape_parameters(
location_parameter.data.flatten(), np.sqrt(scale_parameter.data).flatten()
)
# Loop over thresholds, and use the specified distribution with the
# location and scale parameter to calculate the probabilities relative
# to each threshold.
probabilities = np.empty_like(probability_cube_template.data)
distribution = self.distribution(
*self.shape_parameters,
loc=location_parameter.data.flatten(),
scale=np.sqrt(scale_parameter.data.flatten()),
)
probability_method = distribution.cdf
if relative_to_threshold == "above":
probability_method = distribution.sf
for index, threshold in enumerate(thresholds):
# pylint: disable=unsubscriptable-object
probabilities[index, ...] = np.reshape(
probability_method(threshold), probabilities.shape[1:]
)
probability_cube = probability_cube_template.copy(data=probabilities)
# Make the mask defined above fit the data size and then apply to the
# probability cube.
mask_array = np.array([mask] * len(probabilities))
probability_cube.data = np.ma.masked_where(mask_array, probability_cube.data)
return probability_cube
def process(self, location_parameter, scale_parameter, probability_cube_template):
self._check_template_cube(probability_cube_template)
self._check_unit_compatibility(
location_parameter, scale_parameter, probability_cube_template
)
probability_cube = self._location_and_scale_parameters_to_probabilities(
location_parameter, scale_parameter, probability_cube_template
)
return probability_cube
class EnsembleReordering(BasePlugin):
@staticmethod
def _recycle_raw_ensemble_realizations(
post_processed_forecast_percentiles,
raw_forecast_realizations,
percentile_coord_name,
):
plen = len(
post_processed_forecast_percentiles.coord(percentile_coord_name).points
)
mlen = len(raw_forecast_realizations.coord("realization").points)
if plen == mlen:
pass
else:
raw_forecast_realizations_extended = iris.cube.CubeList()
realization_list = []
mpoints = raw_forecast_realizations.coord("realization").points
# Loop over the number of percentiles and finding the
# corresponding ensemble realization number. The ensemble
# realization numbers are recycled e.g. 1, 2, 3, 1, 2, 3, etc.
for index in range(plen):
realization_list.append(mpoints[index % len(mpoints)])
# Assume that the ensemble realizations are ascending linearly.
new_realization_numbers = realization_list[0] + list(range(plen))
# Extract the realizations required in the realization_list from
# the raw_forecast_realizations. Edit the realization number as
# appropriate and append to a cubelist containing rebadged
# raw ensemble realizations.
for realization, index in zip(realization_list, new_realization_numbers):
constr = iris.Constraint(realization=realization)
raw_forecast_realization = raw_forecast_realizations.extract(constr)
raw_forecast_realization.coord("realization").points = index
raw_forecast_realizations_extended.append(raw_forecast_realization)
raw_forecast_realizations = MergeCubes()(
raw_forecast_realizations_extended, slice_over_realization=True
)
return raw_forecast_realizations
@staticmethod
def rank_ecc(
post_processed_forecast_percentiles,
raw_forecast_realizations,
random_ordering=False,
random_seed=None,
):
results = iris.cube.CubeList([])
for rawfc, calfc in zip(
raw_forecast_realizations.slices_over("time"),
post_processed_forecast_percentiles.slices_over("time"),
):
if random_seed is not None:
random_seed = int(random_seed)
random_seed = np.random.RandomState(random_seed)
random_data = random_seed.rand(*rawfc.data.shape)
if random_ordering:
# Returns the indices that would sort the array.
# As these indices are from a random dataset, only an argsort
# is used.
ranking = np.argsort(random_data, axis=0)
else:
# Lexsort returns the indices sorted firstly by the
# primary key, the raw forecast data (unless random_ordering
# is enabled), and secondly by the secondary key, an array of
# random data, in order to split tied values randomly.
sorting_index = np.lexsort((random_data, rawfc.data), axis=0)
# Returns the indices that would sort the array.
ranking = np.argsort(sorting_index, axis=0)
# Index the post-processed forecast data using the ranking array.
# The following uses a custom choose function that reproduces the
# required elements of the np.choose method without the limitation
# of having < 32 arrays or a leading dimension < 32 in the
# input data array. This function allows indexing of a 3d array
# using a 3d array.
mask = np.ma.getmask(calfc.data)
calfc.data = choose(ranking, calfc.data)
if mask is not np.ma.nomask:
calfc.data = np.ma.MaskedArray(calfc.data, mask, dtype=np.float32)
results.append(calfc)
# Ensure we haven't lost any dimensional coordinates with only one
results = results.merge_cube()
results = check_cube_coordinates(post_processed_forecast_percentiles, results)
return results
def process(
self,
post_processed_forecast,
raw_forecast,
random_ordering=False,
random_seed=None,
):
percentile_coord_name = find_percentile_coordinate(
post_processed_forecast
).name()
enforce_coordinate_ordering(post_processed_forecast, percentile_coord_name)
enforce_coordinate_ordering(raw_forecast, "realization")
raw_forecast = self._recycle_raw_ensemble_realizations(
post_processed_forecast, raw_forecast, percentile_coord_name
)
post_processed_forecast_realizations = self.rank_ecc(
post_processed_forecast,
raw_forecast,
random_ordering=random_ordering,
random_seed=random_seed,
)
plugin = RebadgePercentilesAsRealizations()
post_processed_forecast_realizations = plugin(
post_processed_forecast_realizations
)
enforce_coordinate_ordering(post_processed_forecast_realizations, "realization")
return post_processed_forecast_realizations
| true
| true
|
1c45a614492dc6ca48e3d950527282f5ff9aa377
| 784
|
py
|
Python
|
examples/dagster_examples/intro_tutorial/config.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
examples/dagster_examples/intro_tutorial/config.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
examples/dagster_examples/intro_tutorial/config.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# py27 compat
from dagster import Field, PipelineDefinition, execute_pipeline, solid, types
@solid(config_field=Field(types.String, is_optional=True, default_value='en-us'))
def configurable_hello(context):
if len(context.solid_config) >= 3 and context.solid_config[:3] == 'haw':
return 'Aloha honua!'
elif len(context.solid_config) >= 2 and context.solid_config[:2] == 'cn':
return '你好, 世界!'
else:
return 'Hello, world!'
def define_configurable_hello_pipeline():
return PipelineDefinition(name='configurable_hello_pipeline', solids=[configurable_hello])
def test_intro_tutorial_part_four():
execute_pipeline(
define_configurable_hello_pipeline(), {'solids': {'configurable_hello': {'config': 'cn'}}}
)
| 31.36
| 98
| 0.714286
|
from dagster import Field, PipelineDefinition, execute_pipeline, solid, types
@solid(config_field=Field(types.String, is_optional=True, default_value='en-us'))
def configurable_hello(context):
if len(context.solid_config) >= 3 and context.solid_config[:3] == 'haw':
return 'Aloha honua!'
elif len(context.solid_config) >= 2 and context.solid_config[:2] == 'cn':
return '你好, 世界!'
else:
return 'Hello, world!'
def define_configurable_hello_pipeline():
return PipelineDefinition(name='configurable_hello_pipeline', solids=[configurable_hello])
def test_intro_tutorial_part_four():
execute_pipeline(
define_configurable_hello_pipeline(), {'solids': {'configurable_hello': {'config': 'cn'}}}
)
| true
| true
|
1c45a68d0192fabe44b1195622b98bb7d5868d24
| 3,238
|
py
|
Python
|
kaplot/astro/wcsgrid.py
|
maartenbreddels/kaplot
|
305026209f8026094d54373e14541f4f039501d5
|
[
"MIT"
] | null | null | null |
kaplot/astro/wcsgrid.py
|
maartenbreddels/kaplot
|
305026209f8026094d54373e14541f4f039501d5
|
[
"MIT"
] | null | null | null |
kaplot/astro/wcsgrid.py
|
maartenbreddels/kaplot
|
305026209f8026094d54373e14541f4f039501d5
|
[
"MIT"
] | null | null | null |
from kaplot.objects import PlotObject
import numarray
import kaplot
import kaplot.context
import kaplot.vector
class WcsGrid(PlotObject):
def __init__(self, xticks, yticks, projection, longitudeoffset, lock=True, context=None, **kwargs):
PlotObject.__init__(self, lock=False, context=kaplot.context.mergeDicts(context, kwargs))
self.xticks = xticks
self.yticks = yticks
self.projection = projection
self.longitudeoffset = longitudeoffset
self.context = kaplot.context.buildContext(kwargs)
self.callback = self.notifyChange
self.context.addWeakListener(self.callback)
if lock:
self._lock()
def plot(self, device):
#xticks = self.xticks
#yticks = self.yticks
#xmask = (xticks >= lomin) == (xticks <= lomax)
#ymask = (yticks >= lamin) == (yticks <= lamax)
#xticks = compress(xmask, xticks)
#yticks = compress(ymask, yticks)
#la
#yticks = arange(lamin, lamax, lagran)
#xticks = arange(lomin, lomax, logran)
lines = []
xticks = numarray.array(self.xticks)
yticks = numarray.array(self.yticks)
#xticks = (xticks + 180) % 360 - 180
lomin, lomax = min(xticks), max(xticks)
lamin, lamax = min(yticks), max(yticks)
logran = (lomax - lomin) / 40
lagran = (lamax - lamin) / 40
#print lomin, lomax
#print lamin, lamax
#print lomin, lomax, lamin, lamax
#print xticks, yticks
#print xticks, yticks
#print dev.transformation.transform(xticks, yticks)
#print "PHAT", lomin, lomax, len(yticks)
for latitude in yticks[:]: #arange(lamin, lamax+lagran/2, lagran):
x = numarray.arange(lomin, lomax+logran/2.0, logran)
y = numarray.zeros(len(x)) + float(latitude)
nx, ny = self.projection.forwardarray(x, y)
#print "latitude", latitude
#print "x=",x, "y=",y
#print "new"
#print "nx=",nx, "ny=",ny
nx = []
ny = []
longoffset = self.longitudeoffset
offset = 0 #(int(self.longitudeoffset) / 180) * 180
longitudebegin = -180
while ((x[0]-offset) >= (longitudebegin+longoffset)):
offset += 180
#print "offset", offset
sigma = 0.0001
for x, y in zip(x, y):
if ((x-offset) >= (longitudebegin+longoffset)):
#print "jump", longoffset
p = self.projection.forward(longitudebegin+(longoffset-sigma)-offset, y)
if p != None:
nx.append(p[0])
ny.append(p[1])
offset += (180)
if len(nx) >= 2:
#print "plot", nx, ny
device.plotPolyline(nx, ny)
nx = []
ny = []
p = self.projection.forward(longitudebegin+(longoffset+sigma)-(offset-180), y)
if p != None:
nx.append(p[0])
ny.append(p[1])
#else:
# print "no jump"
p = self.projection.forward(x, y)
if p != None:
nx.append(p[0])
ny.append(p[1])
#p = self.projection.forward(lomax, y)
#if p != None:
# nx.append(p[0])
# ny.append(p[1])
if len(nx) >= 2:
#print "plot", nx, ny
device.plotPolyline(nx, ny)
for longitude in xticks: #arange(lomin, lomax+logran/2, logran):
y = numarray.arange(lamin, lamax+lagran/2, lagran)
x = numarray.zeros(len(y)) + float(longitude)
nx, ny = self.projection.forwardarray(x, y)
device.plotPolyline(nx, ny)
#line = Polyline(x, y, linestyle="normal", linewidth=self.linewidth, color=self.color)
#lines.append(line)
| 30.261682
| 100
| 0.647931
|
from kaplot.objects import PlotObject
import numarray
import kaplot
import kaplot.context
import kaplot.vector
class WcsGrid(PlotObject):
def __init__(self, xticks, yticks, projection, longitudeoffset, lock=True, context=None, **kwargs):
PlotObject.__init__(self, lock=False, context=kaplot.context.mergeDicts(context, kwargs))
self.xticks = xticks
self.yticks = yticks
self.projection = projection
self.longitudeoffset = longitudeoffset
self.context = kaplot.context.buildContext(kwargs)
self.callback = self.notifyChange
self.context.addWeakListener(self.callback)
if lock:
self._lock()
def plot(self, device):
lines = []
xticks = numarray.array(self.xticks)
yticks = numarray.array(self.yticks)
lomin, lomax = min(xticks), max(xticks)
lamin, lamax = min(yticks), max(yticks)
logran = (lomax - lomin) / 40
lagran = (lamax - lamin) / 40
for latitude in yticks[:]:
x = numarray.arange(lomin, lomax+logran/2.0, logran)
y = numarray.zeros(len(x)) + float(latitude)
nx, ny = self.projection.forwardarray(x, y)
nx = []
ny = []
longoffset = self.longitudeoffset
offset = 0
longitudebegin = -180
while ((x[0]-offset) >= (longitudebegin+longoffset)):
offset += 180
sigma = 0.0001
for x, y in zip(x, y):
if ((x-offset) >= (longitudebegin+longoffset)):
p = self.projection.forward(longitudebegin+(longoffset-sigma)-offset, y)
if p != None:
nx.append(p[0])
ny.append(p[1])
offset += (180)
if len(nx) >= 2:
device.plotPolyline(nx, ny)
nx = []
ny = []
p = self.projection.forward(longitudebegin+(longoffset+sigma)-(offset-180), y)
if p != None:
nx.append(p[0])
ny.append(p[1])
p = self.projection.forward(x, y)
if p != None:
nx.append(p[0])
ny.append(p[1])
if len(nx) >= 2:
device.plotPolyline(nx, ny)
for longitude in xticks:
y = numarray.arange(lamin, lamax+lagran/2, lagran)
x = numarray.zeros(len(y)) + float(longitude)
nx, ny = self.projection.forwardarray(x, y)
device.plotPolyline(nx, ny)
| true
| true
|
1c45a7a78535500c62f6eb5fd46da6f909d578fb
| 1,034
|
py
|
Python
|
manage.py
|
manuelen12/test_sale
|
1d199fcfca8361edf704e0bb138a07e7d924f327
|
[
"MIT"
] | null | null | null |
manage.py
|
manuelen12/test_sale
|
1d199fcfca8361edf704e0bb138a07e7d924f327
|
[
"MIT"
] | null | null | null |
manage.py
|
manuelen12/test_sale
|
1d199fcfca8361edf704e0bb138a07e7d924f327
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# test_venta directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'test_venta'))
execute_from_command_line(sys.argv)
| 34.466667
| 77
| 0.658607
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'test_venta'))
execute_from_command_line(sys.argv)
| true
| true
|
1c45a7b3b9bd4e9eca083311a86129a50d7c738e
| 189
|
py
|
Python
|
tests/web_platform/CSS2/normal_flow/test_block_in_inline_insert_014_nosplit_ref.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/CSS2/normal_flow/test_block_in_inline_insert_014_nosplit_ref.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/CSS2/normal_flow/test_block_in_inline_insert_014_nosplit_ref.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestBlockInInlineInsert014NosplitRef(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'block-in-inline-insert-014-nosplit-ref'))
| 31.5
| 93
| 0.814815
|
from tests.utils import W3CTestCase
class TestBlockInInlineInsert014NosplitRef(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'block-in-inline-insert-014-nosplit-ref'))
| true
| true
|
1c45a859a5271dffa80a1d5cc1763cd482c9913a
| 2,912
|
py
|
Python
|
test/integration_tests/test_roles.py
|
poldracklab/bids-core
|
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
|
[
"MIT"
] | 1
|
2016-03-09T01:24:02.000Z
|
2016-03-09T01:24:02.000Z
|
test/integration_tests/test_roles.py
|
poldracklab/bids-core
|
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
|
[
"MIT"
] | 15
|
2016-02-17T19:11:32.000Z
|
2018-04-12T23:33:06.000Z
|
test/integration_tests/test_roles.py
|
poldracklab/bids-core
|
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
|
[
"MIT"
] | 4
|
2017-04-05T17:34:59.000Z
|
2018-01-22T01:40:51.000Z
|
import requests
import os
import json
import time
from nose.tools import with_setup
base_url = 'http://localhost:8080/api'
adm_user = 'test@user.com'
user = 'other@user.com'
test_data = type('',(object,),{})()
def setup_db():
global session
session = requests.Session()
# all the requests will be performed as root
session.params = {
'user': adm_user,
'root': True
}
# Create a group
test_data.group_id = 'test_group_' + str(int(time.time()*1000))
payload = {
'_id': test_data.group_id
}
payload = json.dumps(payload)
r = session.post(base_url + '/groups', data=payload)
assert r.ok
payload = {
'_id': user,
'firstname': 'Other',
'lastname': 'User',
}
payload = json.dumps(payload)
r = session.post(base_url + '/users', data=payload)
assert r.ok
session.params = {}
def teardown_db():
session.params = {
'user': adm_user,
'root': True
}
r = session.delete(base_url + '/groups/' + test_data.group_id)
assert r.ok
r = session.delete(base_url + '/users/' + user)
assert r.ok
def _build_url_and_payload(method, user, access, site='local'):
url = os.path.join(base_url, 'groups', test_data.group_id, 'roles')
if method == 'POST':
payload = {
'_id': user,
'site': site,
'access': access
}
return url, json.dumps(payload)
else:
return os.path.join(url, site, user), None
@with_setup(setup_db, teardown_db)
def test_roles():
session.params = {
'user': adm_user
}
url_get, _ = _build_url_and_payload('GET', user, None)
r = session.get(url_get)
assert r.status_code == 404
url_post, payload = _build_url_and_payload('POST', user, 'rw')
r = session.post(url_post, data=payload)
assert r.ok
r = session.get(url_get)
assert r.ok
content = json.loads(r.content)
assert content['access'] == 'rw'
assert content['_id'] == user
session.params = {
'user': user
}
url_get_not_auth, _ = _build_url_and_payload('GET', adm_user, None)
r = session.get(url_get_not_auth)
assert r.status_code == 403
session.params = {
'user': adm_user
}
payload = json.dumps({'access':'admin'})
r = session.put(url_get, data=payload)
assert r.ok
session.params = {
'user': user
}
r = session.get(url_get_not_auth)
assert r.ok
session.params = {
'user': adm_user
}
payload = json.dumps({'access':'rw'})
r = session.put(url_get, data=payload)
assert r.ok
session.params = {
'user': user
}
r = session.get(url_get_not_auth)
assert r.status_code == 403
session.params = {
'user': adm_user
}
r = session.delete(url_get)
assert r.ok
r = session.get(url_get)
assert r.status_code == 404
| 25.321739
| 71
| 0.595467
|
import requests
import os
import json
import time
from nose.tools import with_setup
base_url = 'http://localhost:8080/api'
adm_user = 'test@user.com'
user = 'other@user.com'
test_data = type('',(object,),{})()
def setup_db():
global session
session = requests.Session()
session.params = {
'user': adm_user,
'root': True
}
test_data.group_id = 'test_group_' + str(int(time.time()*1000))
payload = {
'_id': test_data.group_id
}
payload = json.dumps(payload)
r = session.post(base_url + '/groups', data=payload)
assert r.ok
payload = {
'_id': user,
'firstname': 'Other',
'lastname': 'User',
}
payload = json.dumps(payload)
r = session.post(base_url + '/users', data=payload)
assert r.ok
session.params = {}
def teardown_db():
session.params = {
'user': adm_user,
'root': True
}
r = session.delete(base_url + '/groups/' + test_data.group_id)
assert r.ok
r = session.delete(base_url + '/users/' + user)
assert r.ok
def _build_url_and_payload(method, user, access, site='local'):
url = os.path.join(base_url, 'groups', test_data.group_id, 'roles')
if method == 'POST':
payload = {
'_id': user,
'site': site,
'access': access
}
return url, json.dumps(payload)
else:
return os.path.join(url, site, user), None
@with_setup(setup_db, teardown_db)
def test_roles():
session.params = {
'user': adm_user
}
url_get, _ = _build_url_and_payload('GET', user, None)
r = session.get(url_get)
assert r.status_code == 404
url_post, payload = _build_url_and_payload('POST', user, 'rw')
r = session.post(url_post, data=payload)
assert r.ok
r = session.get(url_get)
assert r.ok
content = json.loads(r.content)
assert content['access'] == 'rw'
assert content['_id'] == user
session.params = {
'user': user
}
url_get_not_auth, _ = _build_url_and_payload('GET', adm_user, None)
r = session.get(url_get_not_auth)
assert r.status_code == 403
session.params = {
'user': adm_user
}
payload = json.dumps({'access':'admin'})
r = session.put(url_get, data=payload)
assert r.ok
session.params = {
'user': user
}
r = session.get(url_get_not_auth)
assert r.ok
session.params = {
'user': adm_user
}
payload = json.dumps({'access':'rw'})
r = session.put(url_get, data=payload)
assert r.ok
session.params = {
'user': user
}
r = session.get(url_get_not_auth)
assert r.status_code == 403
session.params = {
'user': adm_user
}
r = session.delete(url_get)
assert r.ok
r = session.get(url_get)
assert r.status_code == 404
| true
| true
|
1c45a92868008d359499e2e83998919eb99a0158
| 5,916
|
py
|
Python
|
sdk/python/pulumi_azure_native/migrate/latest/group.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/migrate/latest/group.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/migrate/latest/group.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['Group']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""", DeprecationWarning)
class Group(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
group_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A group created in a Migration project.
Latest API Version: 2019-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: For optimistic concurrency control.
:param pulumi.Input[str] group_name: Unique name of a group within a project.
:param pulumi.Input[str] project_name: Name of the Azure Migrate project.
:param pulumi.Input[str] resource_group_name: Name of the Azure Resource Group that project is part of.
"""
pulumi.log.warn("""Group is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['group_name'] = group_name
if project_name is None and not opts.urn:
raise TypeError("Missing required property 'project_name'")
__props__['project_name'] = project_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['properties'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:migrate/latest:Group"), pulumi.Alias(type_="azure-native:migrate:Group"), pulumi.Alias(type_="azure-nextgen:migrate:Group"), pulumi.Alias(type_="azure-native:migrate/v20191001:Group"), pulumi.Alias(type_="azure-nextgen:migrate/v20191001:Group")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Group, __self__).__init__(
'azure-native:migrate/latest:Group',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Group':
"""
Get an existing Group resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["e_tag"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["type"] = None
return Group(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
For optimistic concurrency control.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.GroupPropertiesResponse']:
"""
Properties of the group.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the object = [Microsoft.Migrate/assessmentProjects/groups].
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.869565
| 333
| 0.644523
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['Group']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""", DeprecationWarning)
class Group(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
group_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
pulumi.log.warn("""Group is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:migrate:Group'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['group_name'] = group_name
if project_name is None and not opts.urn:
raise TypeError("Missing required property 'project_name'")
__props__['project_name'] = project_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['properties'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:migrate/latest:Group"), pulumi.Alias(type_="azure-native:migrate:Group"), pulumi.Alias(type_="azure-nextgen:migrate:Group"), pulumi.Alias(type_="azure-native:migrate/v20191001:Group"), pulumi.Alias(type_="azure-nextgen:migrate/v20191001:Group")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Group, __self__).__init__(
'azure-native:migrate/latest:Group',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Group':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["e_tag"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["type"] = None
return Group(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.GroupPropertiesResponse']:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
1c45a98c9736d722678cfe3cb4948c956cd7f2d7
| 6,212
|
py
|
Python
|
tempest/api/object_storage/test_container_sync.py
|
azorge/tempest
|
549dfc93fb7e3d6d8566064a60a6069deae5c8eb
|
[
"Apache-2.0"
] | 1
|
2021-05-21T08:24:02.000Z
|
2021-05-21T08:24:02.000Z
|
tempest/api/object_storage/test_container_sync.py
|
azorge/tempest
|
549dfc93fb7e3d6d8566064a60a6069deae5c8eb
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/object_storage/test_container_sync.py
|
azorge/tempest
|
549dfc93fb7e3d6d8566064a60a6069deae5c8eb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from six.moves.urllib import parse as urlparse
import testtools
from tempest.api.object_storage import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest import test
CONF = config.CONF
# This test can be quite long to run due to its
# dependency on container-sync process running interval.
# You can obviously reduce the container-sync interval in the
# container-server configuration.
class ContainerSyncTest(base.BaseObjectTest):
clients = {}
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(ContainerSyncTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(ContainerSyncTest, cls).setup_clients()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
@classmethod
def resource_setup(cls):
super(ContainerSyncTest, cls).resource_setup()
cls.containers = []
cls.objects = []
# Default container-server config only allows localhost
cls.local_ip = '127.0.0.1'
# Must be configure according to container-sync interval
container_sync_timeout = CONF.object_storage.container_sync_timeout
cls.container_sync_interval = \
CONF.object_storage.container_sync_interval
cls.attempts = \
int(container_sync_timeout / cls.container_sync_interval)
# define container and object clients
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client_alt, cls.object_client_alt)
for cont_name, client in cls.clients.items():
client[0].create_container(cont_name)
cls.containers.append(cont_name)
@classmethod
def resource_cleanup(cls):
for client in cls.clients.values():
cls.delete_containers(client[0], client[1])
super(ContainerSyncTest, cls).resource_cleanup()
def _test_container_synchronization(self, make_headers):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
# turn container synchronization on and create object in container
for cont in (self.containers, self.containers[::-1]):
cont_client = [self.clients[c][0] for c in cont]
obj_client = [self.clients[c][1] for c in cont]
headers = make_headers(cont[1], cont_client[1])
resp, body = \
cont_client[0].put(str(cont[0]), body=None, headers=headers)
# create object in container
object_name = data_utils.rand_name(name='TestSyncObject')
data = object_name[::-1].encode() # Raw data, we need bytes
resp, _ = obj_client[0].create_object(cont[0], object_name, data)
self.objects.append(object_name)
# wait until container contents list is not empty
cont_client = [self.clients[c][0] for c in self.containers]
params = {'format': 'json'}
while self.attempts > 0:
object_lists = []
for c_client, cont in zip(cont_client, self.containers):
resp, object_list = c_client.list_container_contents(
cont, params=params)
object_lists.append(dict(
(obj['name'], obj) for obj in object_list))
# check that containers are not empty and have equal keys()
# or wait for next attempt
if object_lists[0] and object_lists[1] and \
set(object_lists[0].keys()) == set(object_lists[1].keys()):
break
else:
time.sleep(self.container_sync_interval)
self.attempts -= 1
self.assertEqual(object_lists[0], object_lists[1],
'Different object lists in containers.')
# Verify object content
obj_clients = [(self.clients[c][1], c) for c in self.containers]
for obj_client, cont in obj_clients:
for obj_name in object_lists[0]:
resp, object_content = obj_client.get_object(cont, obj_name)
self.assertEqual(object_content, obj_name[::-1].encode())
@test.attr(type='slow')
@decorators.skip_because(bug='1317133')
@decorators.idempotent_id('be008325-1bba-4925-b7dd-93b58f22ce9b')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.container_sync,
'Old-style container sync function is disabled')
def test_container_synchronization(self):
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
client_proxy_ip = \
urlparse.urlparse(cont_client.base_url).netloc.split(':')[0]
client_base_url = \
cont_client.base_url.replace(client_proxy_ip,
self.local_ip)
headers = {'X-Container-Sync-Key': 'sync_key',
'X-Container-Sync-To': "%s/%s" %
(client_base_url, str(cont))}
return headers
self._test_container_synchronization(make_headers)
| 41.413333
| 79
| 0.650193
|
import time
from six.moves.urllib import parse as urlparse
import testtools
from tempest.api.object_storage import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest import test
CONF = config.CONF
class ContainerSyncTest(base.BaseObjectTest):
clients = {}
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(ContainerSyncTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(ContainerSyncTest, cls).setup_clients()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
@classmethod
def resource_setup(cls):
super(ContainerSyncTest, cls).resource_setup()
cls.containers = []
cls.objects = []
cls.local_ip = '127.0.0.1'
container_sync_timeout = CONF.object_storage.container_sync_timeout
cls.container_sync_interval = \
CONF.object_storage.container_sync_interval
cls.attempts = \
int(container_sync_timeout / cls.container_sync_interval)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client_alt, cls.object_client_alt)
for cont_name, client in cls.clients.items():
client[0].create_container(cont_name)
cls.containers.append(cont_name)
@classmethod
def resource_cleanup(cls):
for client in cls.clients.values():
cls.delete_containers(client[0], client[1])
super(ContainerSyncTest, cls).resource_cleanup()
def _test_container_synchronization(self, make_headers):
for cont in (self.containers, self.containers[::-1]):
cont_client = [self.clients[c][0] for c in cont]
obj_client = [self.clients[c][1] for c in cont]
headers = make_headers(cont[1], cont_client[1])
resp, body = \
cont_client[0].put(str(cont[0]), body=None, headers=headers)
object_name = data_utils.rand_name(name='TestSyncObject')
data = object_name[::-1].encode()
resp, _ = obj_client[0].create_object(cont[0], object_name, data)
self.objects.append(object_name)
cont_client = [self.clients[c][0] for c in self.containers]
params = {'format': 'json'}
while self.attempts > 0:
object_lists = []
for c_client, cont in zip(cont_client, self.containers):
resp, object_list = c_client.list_container_contents(
cont, params=params)
object_lists.append(dict(
(obj['name'], obj) for obj in object_list))
if object_lists[0] and object_lists[1] and \
set(object_lists[0].keys()) == set(object_lists[1].keys()):
break
else:
time.sleep(self.container_sync_interval)
self.attempts -= 1
self.assertEqual(object_lists[0], object_lists[1],
'Different object lists in containers.')
obj_clients = [(self.clients[c][1], c) for c in self.containers]
for obj_client, cont in obj_clients:
for obj_name in object_lists[0]:
resp, object_content = obj_client.get_object(cont, obj_name)
self.assertEqual(object_content, obj_name[::-1].encode())
@test.attr(type='slow')
@decorators.skip_because(bug='1317133')
@decorators.idempotent_id('be008325-1bba-4925-b7dd-93b58f22ce9b')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.container_sync,
'Old-style container sync function is disabled')
def test_container_synchronization(self):
def make_headers(cont, cont_client):
client_proxy_ip = \
urlparse.urlparse(cont_client.base_url).netloc.split(':')[0]
client_base_url = \
cont_client.base_url.replace(client_proxy_ip,
self.local_ip)
headers = {'X-Container-Sync-Key': 'sync_key',
'X-Container-Sync-To': "%s/%s" %
(client_base_url, str(cont))}
return headers
self._test_container_synchronization(make_headers)
| true
| true
|
1c45ab721c9d7842215f9675276f0e2745f79bac
| 14,462
|
py
|
Python
|
external/workload-automation/wa/framework/signal.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 1
|
2020-11-30T16:14:02.000Z
|
2020-11-30T16:14:02.000Z
|
external/workload-automation/wa/framework/signal.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | null | null | null |
external/workload-automation/wa/framework/signal.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 1
|
2020-10-09T11:40:00.000Z
|
2020-10-09T11:40:00.000Z
|
# Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module wraps louie signalling mechanism. It relies on modified version of loiue
that has prioritization added to handler invocation.
"""
import sys
import logging
from contextlib import contextmanager
import wrapt
from louie import dispatcher # pylint: disable=wrong-import-order
from wa.utils.types import prioritylist, enum
logger = logging.getLogger('signal')
class Signal(object):
"""
This class implements the signals to be used for notifiying callbacks
registered to respond to different states and stages of the execution of workload
automation.
"""
def __init__(self, name, description='no description', invert_priority=False):
"""
Instantiates a Signal.
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be
ordered with ascending or descending
priorities. Typically this flag should be
set to True if the Signal is triggered
AFTER an a state/stage has been reached.
That way callbacks with high priorities
will be called right after the event has
occured.
"""
self.name = name
self.description = description
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
# Signals associated with run-related events
RUN_STARTED = Signal('run-started', 'sent at the beginning of the run')
RUN_INITIALIZED = Signal('run-initialized', 'set after the run has been initialized')
RUN_ABORTED = Signal('run-aborted', 'set when the run has been aborted due to a keyboard interrupt')
RUN_FAILED = Signal('run-failed', 'set if the run has failed to complete all jobs.')
RUN_FINALIZED = Signal('run-finalized', 'set after the run has been finalized')
RUN_COMPLETED = Signal('run-completed', 'set upon completion of the run (regardless of whether or not it has failed')
# Signals associated with job-related events
JOB_STARTED = Signal('job-started', 'set when a a new job has been started')
JOB_ABORTED = Signal('job-aborted',
description='''
sent if a job has been aborted due to a keyboard interrupt.
.. note:: While the status of every job that has not had a
chance to run due to being interrupted will be
set to "ABORTED", this signal will only be sent
for the job that was actually running at the
time.
''')
JOB_FAILED = Signal('job-failed', description='set if the job has failed')
JOB_RESTARTED = Signal('job-restarted')
JOB_COMPLETED = Signal('job-completed')
# Signals associated with particular stages of workload execution
BEFORE_WORKLOAD_INITIALIZED = Signal('before-workload-initialized',
invert_priority=True)
SUCCESSFUL_WORKLOAD_INITIALIZED = Signal('successful-workload-initialized')
AFTER_WORKLOAD_INITIALIZED = Signal('after-workload-initialized')
BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup')
BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution')
BEFORE_WORKLOAD_RESULT_EXTRACTION = Signal('before-workload-result-extracton', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_EXTRACTION = Signal('successful-workload-result-extracton')
AFTER_WORKLOAD_RESULT_EXTRACTION = Signal('after-workload-result-extracton')
BEFORE_WORKLOAD_OUTPUT_UPDATE = Signal('before-workload-output-update',
invert_priority=True)
SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE = Signal('successful-workload-output-update')
AFTER_WORKLOAD_OUTPUT_UPDATE = Signal('after-workload-output-update')
BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown')
BEFORE_WORKLOAD_FINALIZED = Signal('before-workload-finalized', invert_priority=True)
SUCCESSFUL_WORKLOAD_FINALIZED = Signal('successful-workload-finalized')
AFTER_WORKLOAD_FINALIZED = Signal('after-workload-finalized')
# Signals indicating exceptional conditions
ERROR_LOGGED = Signal('error-logged')
WARNING_LOGGED = Signal('warning-logged')
# These are paired events -- if the before_event is sent, the after_ signal is
# guaranteed to also be sent. In particular, the after_ signals will be sent
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
AFTER_RUN_INIT = Signal('after-run-init')
BEFORE_JOB = Signal('before-job', invert_priority=True)
SUCCESSFUL_JOB = Signal('successful-job')
AFTER_JOB = Signal('after-job')
BEFORE_JOB_QUEUE_EXECUTION = Signal('before-job-queue-execution', invert_priority=True)
SUCCESSFUL_JOB_QUEUE_EXECUTION = Signal('successful-job-queue-execution')
AFTER_JOB_QUEUE_EXECUTION = Signal('after-job-queue-execution')
BEFORE_JOB_TARGET_CONFIG = Signal('before-job-target-config', invert_priority=True)
SUCCESSFUL_JOB_TARGET_CONFIG = Signal('successful-job-target-config')
AFTER_JOB_TARGET_CONFIG = Signal('after-job-target-config')
BEFORE_JOB_OUTPUT_PROCESSED = Signal('before-job-output-processed',
invert_priority=True)
SUCCESSFUL_JOB_OUTPUT_PROCESSED = Signal('successful-job-output-processed')
AFTER_JOB_OUTPUT_PROCESSED = Signal('after-job-output-processed')
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')
BEFORE_REBOOT = Signal('before-reboot', invert_priority=True)
SUCCESSFUL_REBOOT = Signal('successful-reboot')
AFTER_REBOOT = Signal('after-reboot')
BEFORE_TARGET_CONNECT = Signal('before-target-connect', invert_priority=True)
SUCCESSFUL_TARGET_CONNECT = Signal('successful-target-connect')
AFTER_TARGET_CONNECT = Signal('after-target-connect')
BEFORE_TARGET_DISCONNECT = Signal('before-target-disconnect', invert_priority=True)
SUCCESSFUL_TARGET_DISCONNECT = Signal('successful-target-disconnect')
AFTER_TARGET_DISCONNECT = Signal('after-target-disconnect')
BEFORE_RUN_OUTPUT_PROCESSED = Signal(
'before-run-output-processed', invert_priority=True)
SUCCESSFUL_RUN_OUTPUT_PROCESSED = Signal(
'successful-run-output-processed')
AFTER_RUN_OUTPUT_PROCESSED = Signal(
'after-run-output-processed')
CallbackPriority = enum(['extremely_low', 'very_low', 'low', 'normal',
'high', 'very_high', 'extremely_high'], -30, 10)
class _prioritylist_wrapper(prioritylist):
"""
This adds a NOP append() method so that when louie invokes it to add the
handler to receivers, nothing will happen; the handler is actually added inside
the connect() below according to priority, before louie's connect() gets invoked.
"""
def append(self, *args, **kwargs):
pass
def connect(handler, signal, sender=dispatcher.Any, priority=0):
"""
Connects a callback to a signal, so that the callback will be automatically invoked
when that signal is sent.
Parameters:
:handler: This can be any callable that that takes the right arguments for
the signal. For most signals this means a single argument that
will be an ``ExecutionContext`` instance. But please see documentation
for individual signals in the :ref:`signals reference <instruments_method_map>`.
:signal: The signal to which the handler will be subscribed. Please see
:ref:`signals reference <instruments_method_map>` for the list of standard WA
signals.
.. note:: There is nothing that prevents instruments from sending their
own signals that are not part of the standard set. However the signal
must always be an :class:`wa.core.signal.Signal` instance.
:sender: The handler will be invoked only for the signals emitted by this sender. By
default, this is set to :class:`louie.dispatcher.Any`, so the handler will
be invoked for signals from any sender.
:priority: An integer (positive or negative) the specifies the priority of the handler.
Handlers with higher priority will be called before handlers with lower
priority. The call order of handlers with the same priority is not specified.
Defaults to 0.
.. note:: Priorities for some signals are inverted (so highest priority
handlers get executed last). Please see :ref:`signals reference <instruments_method_map>`
for details.
"""
logger.debug('Connecting {} to {}({}) with priority {}'.format(handler, signal, sender, priority))
if getattr(signal, 'invert_priority', False):
priority = -priority
senderkey = id(sender)
if senderkey in dispatcher.connections:
signals = dispatcher.connections[senderkey]
else:
dispatcher.connections[senderkey] = signals = {}
if signal in signals:
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
receivers.add(handler, priority)
dispatcher.connect(handler, signal, sender)
def disconnect(handler, signal, sender=dispatcher.Any):
"""
Disconnect a previously connected handler form the specified signal, optionally, only
for the specified sender.
Parameters:
:handler: The callback to be disconnected.
:signal: The signal the handler is to be disconnected form. It will
be an :class:`wa.core.signal.Signal` instance.
:sender: If specified, the handler will only be disconnected from the signal
sent by this sender.
"""
logger.debug('Disconnecting {} from {}({})'.format(handler, signal, sender))
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Paramters:
:signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal`
or its subclasses.
:sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
be subscribed to signals from a particular sender.
The rest of the parameters will be passed on as aruments to the handler.
"""
logger.debug('Sending {} from {}'.format(signal, sender))
return dispatcher.send(signal, sender, *args, **kwargs)
# This will normally be set to log_error() by init_logging(); see wa.utils.log
# Done this way to prevent a circular import dependency.
log_error_func = logger.error
def safe_send(signal, sender=dispatcher.Anonymous,
propagate=None, *args, **kwargs):
"""
Same as ``send``, except this will catch and log all exceptions raised
by handlers, except those specified in ``propagate`` argument (defaults
to just ``[KeyboardInterrupt]``).
"""
if propagate is None:
propagate = [KeyboardInterrupt]
try:
logger.debug('Safe-sending {} from {}'.format(signal, sender))
send(signal, sender, *args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if any(isinstance(e, p) for p in propagate):
raise e
log_error_func(e)
@contextmanager
def wrap(signal_name, sender=dispatcher.Anonymous, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
safe = kwargs.pop('safe', False)
signal_name = signal_name.upper().replace('-', '_')
send_func = safe_send if safe else send
try:
before_signal = globals()['BEFORE_' + signal_name]
success_signal = globals()['SUCCESSFUL_' + signal_name]
after_signal = globals()['AFTER_' + signal_name]
except KeyError:
raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))
try:
send_func(before_signal, sender, *args, **kwargs)
yield
send_func(success_signal, sender, *args, **kwargs)
finally:
_, exc, _ = sys.exc_info()
if exc:
log_error_func(exc)
send_func(after_signal, sender, *args, **kwargs)
def wrapped(signal_name, sender=dispatcher.Anonymous, safe=False):
"""A decorator for wrapping function in signal dispatch."""
@wrapt.decorator
def signal_wrapped(wrapped_func, _, args, kwargs):
def signal_wrapper(*args, **kwargs):
with wrap(signal_name, sender, safe):
return wrapped_func(*args, **kwargs)
return signal_wrapper(*args, **kwargs)
return signal_wrapped
| 42.163265
| 118
| 0.691675
|
import sys
import logging
from contextlib import contextmanager
import wrapt
from louie import dispatcher
from wa.utils.types import prioritylist, enum
logger = logging.getLogger('signal')
class Signal(object):
def __init__(self, name, description='no description', invert_priority=False):
self.name = name
self.description = description
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
RUN_STARTED = Signal('run-started', 'sent at the beginning of the run')
RUN_INITIALIZED = Signal('run-initialized', 'set after the run has been initialized')
RUN_ABORTED = Signal('run-aborted', 'set when the run has been aborted due to a keyboard interrupt')
RUN_FAILED = Signal('run-failed', 'set if the run has failed to complete all jobs.')
RUN_FINALIZED = Signal('run-finalized', 'set after the run has been finalized')
RUN_COMPLETED = Signal('run-completed', 'set upon completion of the run (regardless of whether or not it has failed')
JOB_STARTED = Signal('job-started', 'set when a a new job has been started')
JOB_ABORTED = Signal('job-aborted',
description='''
sent if a job has been aborted due to a keyboard interrupt.
.. note:: While the status of every job that has not had a
chance to run due to being interrupted will be
set to "ABORTED", this signal will only be sent
for the job that was actually running at the
time.
''')
JOB_FAILED = Signal('job-failed', description='set if the job has failed')
JOB_RESTARTED = Signal('job-restarted')
JOB_COMPLETED = Signal('job-completed')
BEFORE_WORKLOAD_INITIALIZED = Signal('before-workload-initialized',
invert_priority=True)
SUCCESSFUL_WORKLOAD_INITIALIZED = Signal('successful-workload-initialized')
AFTER_WORKLOAD_INITIALIZED = Signal('after-workload-initialized')
BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup')
BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution')
BEFORE_WORKLOAD_RESULT_EXTRACTION = Signal('before-workload-result-extracton', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_EXTRACTION = Signal('successful-workload-result-extracton')
AFTER_WORKLOAD_RESULT_EXTRACTION = Signal('after-workload-result-extracton')
BEFORE_WORKLOAD_OUTPUT_UPDATE = Signal('before-workload-output-update',
invert_priority=True)
SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE = Signal('successful-workload-output-update')
AFTER_WORKLOAD_OUTPUT_UPDATE = Signal('after-workload-output-update')
BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown')
BEFORE_WORKLOAD_FINALIZED = Signal('before-workload-finalized', invert_priority=True)
SUCCESSFUL_WORKLOAD_FINALIZED = Signal('successful-workload-finalized')
AFTER_WORKLOAD_FINALIZED = Signal('after-workload-finalized')
ERROR_LOGGED = Signal('error-logged')
WARNING_LOGGED = Signal('warning-logged')
BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
AFTER_RUN_INIT = Signal('after-run-init')
BEFORE_JOB = Signal('before-job', invert_priority=True)
SUCCESSFUL_JOB = Signal('successful-job')
AFTER_JOB = Signal('after-job')
BEFORE_JOB_QUEUE_EXECUTION = Signal('before-job-queue-execution', invert_priority=True)
SUCCESSFUL_JOB_QUEUE_EXECUTION = Signal('successful-job-queue-execution')
AFTER_JOB_QUEUE_EXECUTION = Signal('after-job-queue-execution')
BEFORE_JOB_TARGET_CONFIG = Signal('before-job-target-config', invert_priority=True)
SUCCESSFUL_JOB_TARGET_CONFIG = Signal('successful-job-target-config')
AFTER_JOB_TARGET_CONFIG = Signal('after-job-target-config')
BEFORE_JOB_OUTPUT_PROCESSED = Signal('before-job-output-processed',
invert_priority=True)
SUCCESSFUL_JOB_OUTPUT_PROCESSED = Signal('successful-job-output-processed')
AFTER_JOB_OUTPUT_PROCESSED = Signal('after-job-output-processed')
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')
BEFORE_REBOOT = Signal('before-reboot', invert_priority=True)
SUCCESSFUL_REBOOT = Signal('successful-reboot')
AFTER_REBOOT = Signal('after-reboot')
BEFORE_TARGET_CONNECT = Signal('before-target-connect', invert_priority=True)
SUCCESSFUL_TARGET_CONNECT = Signal('successful-target-connect')
AFTER_TARGET_CONNECT = Signal('after-target-connect')
BEFORE_TARGET_DISCONNECT = Signal('before-target-disconnect', invert_priority=True)
SUCCESSFUL_TARGET_DISCONNECT = Signal('successful-target-disconnect')
AFTER_TARGET_DISCONNECT = Signal('after-target-disconnect')
BEFORE_RUN_OUTPUT_PROCESSED = Signal(
'before-run-output-processed', invert_priority=True)
SUCCESSFUL_RUN_OUTPUT_PROCESSED = Signal(
'successful-run-output-processed')
AFTER_RUN_OUTPUT_PROCESSED = Signal(
'after-run-output-processed')
CallbackPriority = enum(['extremely_low', 'very_low', 'low', 'normal',
'high', 'very_high', 'extremely_high'], -30, 10)
class _prioritylist_wrapper(prioritylist):
def append(self, *args, **kwargs):
pass
def connect(handler, signal, sender=dispatcher.Any, priority=0):
logger.debug('Connecting {} to {}({}) with priority {}'.format(handler, signal, sender, priority))
if getattr(signal, 'invert_priority', False):
priority = -priority
senderkey = id(sender)
if senderkey in dispatcher.connections:
signals = dispatcher.connections[senderkey]
else:
dispatcher.connections[senderkey] = signals = {}
if signal in signals:
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
receivers.add(handler, priority)
dispatcher.connect(handler, signal, sender)
def disconnect(handler, signal, sender=dispatcher.Any):
logger.debug('Disconnecting {} from {}({})'.format(handler, signal, sender))
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
logger.debug('Sending {} from {}'.format(signal, sender))
return dispatcher.send(signal, sender, *args, **kwargs)
log_error_func = logger.error
def safe_send(signal, sender=dispatcher.Anonymous,
propagate=None, *args, **kwargs):
if propagate is None:
propagate = [KeyboardInterrupt]
try:
logger.debug('Safe-sending {} from {}'.format(signal, sender))
send(signal, sender, *args, **kwargs)
except Exception as e:
if any(isinstance(e, p) for p in propagate):
raise e
log_error_func(e)
@contextmanager
def wrap(signal_name, sender=dispatcher.Anonymous, *args, **kwargs):
safe = kwargs.pop('safe', False)
signal_name = signal_name.upper().replace('-', '_')
send_func = safe_send if safe else send
try:
before_signal = globals()['BEFORE_' + signal_name]
success_signal = globals()['SUCCESSFUL_' + signal_name]
after_signal = globals()['AFTER_' + signal_name]
except KeyError:
raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))
try:
send_func(before_signal, sender, *args, **kwargs)
yield
send_func(success_signal, sender, *args, **kwargs)
finally:
_, exc, _ = sys.exc_info()
if exc:
log_error_func(exc)
send_func(after_signal, sender, *args, **kwargs)
def wrapped(signal_name, sender=dispatcher.Anonymous, safe=False):
@wrapt.decorator
def signal_wrapped(wrapped_func, _, args, kwargs):
def signal_wrapper(*args, **kwargs):
with wrap(signal_name, sender, safe):
return wrapped_func(*args, **kwargs)
return signal_wrapper(*args, **kwargs)
return signal_wrapped
| true
| true
|
1c45ac250287c61459664f4104f27b4fea00e83d
| 61
|
py
|
Python
|
language-python-test/test/features/comprehensions/set_comprehension.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
language-python-test/test/features/comprehensions/set_comprehension.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
language-python-test/test/features/comprehensions/set_comprehension.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
{ x + y for x in [1,2,3] if x > 1 for y in [4,5,6] if y < 6}
| 30.5
| 60
| 0.459016
|
{ x + y for x in [1,2,3] if x > 1 for y in [4,5,6] if y < 6}
| true
| true
|
1c45ad4927dd2f22598e965b4d772bbae5f47434
| 1,172
|
py
|
Python
|
tests/api/ils/eitems/test_eitems_crud.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 41
|
2018-09-04T13:00:46.000Z
|
2022-03-24T20:45:56.000Z
|
tests/api/ils/eitems/test_eitems_crud.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 720
|
2017-03-10T08:02:41.000Z
|
2022-01-14T15:36:37.000Z
|
tests/api/ils/eitems/test_eitems_crud.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 54
|
2017-03-09T16:05:29.000Z
|
2022-03-17T08:34:51.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Tests eitems CRUD."""
import pytest
from invenio_app_ils.eitems.api import EItem
from invenio_app_ils.errors import DocumentNotFoundError
def test_eitem_refs(app, testdata):
"""Test creation of an eitem."""
eitem = EItem.create(
dict(
pid="eitemid-99",
document_pid="docid-1",
created_by=dict(type="script", value="demo"),
)
)
assert "$schema" in eitem
assert "document" in eitem and "$ref" in eitem["document"]
eitem = EItem.get_record_by_pid("eitemid-4")
eitem = eitem.replace_refs()
assert "document" in eitem and eitem["document"]["title"]
def test_eitem_validation(db, testdata):
"""Test validation when updating an eitem."""
eitem_pid = testdata["eitems"][0]["pid"]
eitem = EItem.get_record_by_pid(eitem_pid)
# change document pid
eitem["document_pid"] = "not_found_doc"
with pytest.raises(DocumentNotFoundError):
eitem.commit()
| 27.904762
| 77
| 0.669795
|
import pytest
from invenio_app_ils.eitems.api import EItem
from invenio_app_ils.errors import DocumentNotFoundError
def test_eitem_refs(app, testdata):
eitem = EItem.create(
dict(
pid="eitemid-99",
document_pid="docid-1",
created_by=dict(type="script", value="demo"),
)
)
assert "$schema" in eitem
assert "document" in eitem and "$ref" in eitem["document"]
eitem = EItem.get_record_by_pid("eitemid-4")
eitem = eitem.replace_refs()
assert "document" in eitem and eitem["document"]["title"]
def test_eitem_validation(db, testdata):
eitem_pid = testdata["eitems"][0]["pid"]
eitem = EItem.get_record_by_pid(eitem_pid)
eitem["document_pid"] = "not_found_doc"
with pytest.raises(DocumentNotFoundError):
eitem.commit()
| true
| true
|
1c45ad5c3147af9dff358391d91445cf2f8d76bf
| 3,131
|
py
|
Python
|
from_cpython/Lib/test/test_normalization.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 9
|
2015-04-15T10:58:49.000Z
|
2018-09-24T09:11:33.000Z
|
Lib/test/test_normalization.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 2
|
2020-02-17T22:31:09.000Z
|
2020-02-18T04:31:55.000Z
|
Lib/test/test_normalization.py
|
odsod/cpython-internals-course
|
55fffca28e83ac0f30029c60113a3110451dfa08
|
[
"PSF-2.0"
] | 9
|
2015-03-13T18:27:27.000Z
|
2018-12-03T15:38:51.000Z
|
from test.test_support import run_unittest, open_urlresource
import unittest
from httplib import HTTPException
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
def check_version(testfile):
hdr = testfile.readline()
return unidata_version in hdr
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return u"".join([unichr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part = None
part1_data = {}
# Hit the exception early
try:
testdata = open_urlresource(TESTDATAURL, check_version)
except (IOError, HTTPException):
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in testdata:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try at least adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', u'\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| 30.398058
| 89
| 0.516448
|
from test.test_support import run_unittest, open_urlresource
import unittest
from httplib import HTTPException
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
def check_version(testfile):
hdr = testfile.readline()
return unidata_version in hdr
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return u"".join([unichr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part = None
part1_data = {}
try:
testdata = open_urlresource(TESTDATAURL, check_version)
except (IOError, HTTPException):
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in testdata:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
if part == "@Part1":
part1_data[c1] = 1
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
normalize('NFC', u'\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| true
| true
|
1c45af1163ca30e3f1de7ee012519613a5a4350b
| 66,206
|
py
|
Python
|
test/test_datasets.py
|
CellEight/vision
|
e8dded4c05ee403633529cef2e09bf94b07f6170
|
[
"BSD-3-Clause"
] | 1
|
2021-04-12T09:42:25.000Z
|
2021-04-12T09:42:25.000Z
|
test/test_datasets.py
|
mvpzhangqiu/vision
|
e8dded4c05ee403633529cef2e09bf94b07f6170
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_datasets.py
|
mvpzhangqiu/vision
|
e8dded4c05ee403633529cef2e09bf94b07f6170
|
[
"BSD-3-Clause"
] | null | null | null |
import contextlib
import sys
import os
import unittest
from unittest import mock
import numpy as np
import PIL
from PIL import Image
from torch._utils_internal import get_file_path_2
import torchvision
from torchvision.datasets import utils
from common_utils import get_tmp_dir
from fakedata_generation import svhn_root, places365_root, widerface_root, stl10_root
import xml.etree.ElementTree as ET
from urllib.request import Request, urlopen
import itertools
import datasets_utils
import pathlib
import pickle
from torchvision import datasets
import torch
import shutil
import json
import random
import bz2
import torch.nn.functional as F
import string
import io
import zipfile
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import av
HAS_PYAV = True
except ImportError:
HAS_PYAV = False
class DatasetTestcase(unittest.TestCase):
def generic_classification_dataset_test(self, dataset, num_images=1):
self.assertEqual(len(dataset), num_images)
img, target = dataset[0]
self.assertTrue(isinstance(img, PIL.Image.Image))
self.assertTrue(isinstance(target, int))
def generic_segmentation_dataset_test(self, dataset, num_images=1):
self.assertEqual(len(dataset), num_images)
img, target = dataset[0]
self.assertTrue(isinstance(img, PIL.Image.Image))
self.assertTrue(isinstance(target, PIL.Image.Image))
class Tester(DatasetTestcase):
@mock.patch('torchvision.datasets.SVHN._check_integrity')
@unittest.skipIf(not HAS_SCIPY, "scipy unavailable")
def test_svhn(self, mock_check):
mock_check.return_value = True
with svhn_root() as root:
dataset = torchvision.datasets.SVHN(root, split="train")
self.generic_classification_dataset_test(dataset, num_images=2)
dataset = torchvision.datasets.SVHN(root, split="test")
self.generic_classification_dataset_test(dataset, num_images=2)
dataset = torchvision.datasets.SVHN(root, split="extra")
self.generic_classification_dataset_test(dataset, num_images=2)
def test_places365(self):
for split, small in itertools.product(("train-standard", "train-challenge", "val"), (False, True)):
with places365_root(split=split, small=small) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)
self.generic_classification_dataset_test(dataset, num_images=len(data["imgs"]))
def test_places365_transforms(self):
expected_image = "image"
expected_target = "target"
def transform(image):
return expected_image
def target_transform(target):
return expected_target
with places365_root() as places365:
root, data = places365
dataset = torchvision.datasets.Places365(
root, transform=transform, target_transform=target_transform, download=True
)
actual_image, actual_target = dataset[0]
self.assertEqual(actual_image, expected_image)
self.assertEqual(actual_target, expected_target)
def test_places365_devkit_download(self):
for split in ("train-standard", "train-challenge", "val"):
with self.subTest(split=split):
with places365_root(split=split) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, download=True)
with self.subTest("classes"):
self.assertSequenceEqual(dataset.classes, data["classes"])
with self.subTest("class_to_idx"):
self.assertDictEqual(dataset.class_to_idx, data["class_to_idx"])
with self.subTest("imgs"):
self.assertSequenceEqual(dataset.imgs, data["imgs"])
def test_places365_devkit_no_download(self):
for split in ("train-standard", "train-challenge", "val"):
with self.subTest(split=split):
with places365_root(split=split) as places365:
root, data = places365
with self.assertRaises(RuntimeError):
torchvision.datasets.Places365(root, split=split, download=False)
def test_places365_images_download(self):
for split, small in itertools.product(("train-standard", "train-challenge", "val"), (False, True)):
with self.subTest(split=split, small=small):
with places365_root(split=split, small=small) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)
assert all(os.path.exists(item[0]) for item in dataset.imgs)
def test_places365_images_download_preexisting(self):
split = "train-standard"
small = False
images_dir = "data_large_standard"
with places365_root(split=split, small=small) as places365:
root, data = places365
os.mkdir(os.path.join(root, images_dir))
with self.assertRaises(RuntimeError):
torchvision.datasets.Places365(root, split=split, small=small, download=True)
def test_places365_repr_smoke(self):
with places365_root() as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, download=True)
self.assertIsInstance(repr(dataset), str)
class STL10Tester(DatasetTestcase):
@contextlib.contextmanager
def mocked_root(self):
with stl10_root() as (root, data):
yield root, data
@contextlib.contextmanager
def mocked_dataset(self, pre_extract=False, download=True, **kwargs):
with self.mocked_root() as (root, data):
if pre_extract:
utils.extract_archive(os.path.join(root, data["archive"]))
dataset = torchvision.datasets.STL10(root, download=download, **kwargs)
yield dataset, data
def test_not_found(self):
with self.assertRaises(RuntimeError):
with self.mocked_dataset(download=False):
pass
def test_splits(self):
for split in ('train', 'train+unlabeled', 'unlabeled', 'test'):
with self.mocked_dataset(split=split) as (dataset, data):
num_images = sum([data["num_images_in_split"][part] for part in split.split("+")])
self.generic_classification_dataset_test(dataset, num_images=num_images)
def test_folds(self):
for fold in range(10):
with self.mocked_dataset(split="train", folds=fold) as (dataset, data):
num_images = data["num_images_in_folds"][fold]
self.assertEqual(len(dataset), num_images)
def test_invalid_folds1(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds=10):
pass
def test_invalid_folds2(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds="0"):
pass
def test_transforms(self):
expected_image = "image"
expected_target = "target"
def transform(image):
return expected_image
def target_transform(target):
return expected_target
with self.mocked_dataset(transform=transform, target_transform=target_transform) as (dataset, _):
actual_image, actual_target = dataset[0]
self.assertEqual(actual_image, expected_image)
self.assertEqual(actual_target, expected_target)
def test_unlabeled(self):
with self.mocked_dataset(split="unlabeled") as (dataset, _):
labels = [dataset[idx][1] for idx in range(len(dataset))]
self.assertTrue(all([label == -1 for label in labels]))
@unittest.mock.patch("torchvision.datasets.stl10.download_and_extract_archive")
def test_download_preexisting(self, mock):
with self.mocked_dataset(pre_extract=True) as (dataset, data):
mock.assert_not_called()
def test_repr_smoke(self):
with self.mocked_dataset() as (dataset, _):
self.assertIsInstance(repr(dataset), str)
class Caltech101TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Caltech101
FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
target_type=("category", "annotation", ["category", "annotation"])
)
REQUIRED_PACKAGES = ("scipy",)
def inject_fake_data(self, tmpdir, config):
root = pathlib.Path(tmpdir) / "caltech101"
images = root / "101_ObjectCategories"
annotations = root / "Annotations"
categories = (("Faces", "Faces_2"), ("helicopter", "helicopter"), ("ying_yang", "ying_yang"))
num_images_per_category = 2
for image_category, annotation_category in categories:
datasets_utils.create_image_folder(
root=images,
name=image_category,
file_name_fn=lambda idx: f"image_{idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
self._create_annotation_folder(
root=annotations,
name=annotation_category,
file_name_fn=lambda idx: f"annotation_{idx + 1:04d}.mat",
num_examples=num_images_per_category,
)
# This is included in the original archive, but is removed by the dataset. Thus, an empty directory suffices.
os.makedirs(images / "BACKGROUND_Google")
return num_images_per_category * len(categories)
def _create_annotation_folder(self, root, name, file_name_fn, num_examples):
root = pathlib.Path(root) / name
os.makedirs(root)
for idx in range(num_examples):
self._create_annotation_file(root, file_name_fn(idx))
def _create_annotation_file(self, root, name):
mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy())
datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict)
def test_combined_targets(self):
target_types = ["category", "annotation"]
individual_targets = []
for target_type in target_types:
with self.create_dataset(target_type=target_type) as (dataset, _):
_, target = dataset[0]
individual_targets.append(target)
with self.create_dataset(target_type=target_types) as (dataset, _):
_, combined_targets = dataset[0]
actual = len(individual_targets)
expected = len(combined_targets)
self.assertEqual(
actual,
expected,
f"The number of the returned combined targets does not match the the number targets if requested "
f"individually: {actual} != {expected}",
)
for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):
with self.subTest(target_type=target_type):
actual = type(combined_target)
expected = type(individual_target)
self.assertIs(
actual,
expected,
f"Type of the combined target does not match the type of the corresponding individual target: "
f"{actual} is not {expected}",
)
class Caltech256TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Caltech256
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir) / "caltech256" / "256_ObjectCategories"
categories = ((1, "ak47"), (127, "laptop-101"), (257, "clutter"))
num_images_per_category = 2
for idx, category in categories:
datasets_utils.create_image_folder(
tmpdir,
name=f"{idx:03d}.{category}",
file_name_fn=lambda image_idx: f"{idx:03d}_{image_idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
return num_images_per_category * len(categories)
class WIDERFaceTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.WIDERFace
FEATURE_TYPES = (PIL.Image.Image, (dict, type(None))) # test split returns None as target
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val', 'test'))
def inject_fake_data(self, tmpdir, config):
widerface_dir = pathlib.Path(tmpdir) / 'widerface'
annotations_dir = widerface_dir / 'wider_face_split'
os.makedirs(annotations_dir)
split_to_idx = split_to_num_examples = {
"train": 1,
"val": 2,
"test": 3,
}
# We need to create all folders regardless of the split in config
for split in ('train', 'val', 'test'):
split_idx = split_to_idx[split]
num_examples = split_to_num_examples[split]
datasets_utils.create_image_folder(
root=tmpdir,
name=widerface_dir / f'WIDER_{split}' / 'images' / '0--Parade',
file_name_fn=lambda image_idx: f"0_Parade_marchingband_1_{split_idx + image_idx}.jpg",
num_examples=num_examples,
)
annotation_file_name = {
'train': annotations_dir / 'wider_face_train_bbx_gt.txt',
'val': annotations_dir / 'wider_face_val_bbx_gt.txt',
'test': annotations_dir / 'wider_face_test_filelist.txt',
}[split]
annotation_content = {
"train": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n"
for image_idx in range(num_examples)
),
"val": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n"
for image_idx in range(num_examples)
),
"test": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n"
for image_idx in range(num_examples)
),
}[split]
with open(annotation_file_name, "w") as annotation_file:
annotation_file.write(annotation_content)
return split_to_num_examples[config["split"]]
class CityScapesTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Cityscapes
TARGET_TYPES = (
"instance",
"semantic",
"polygon",
"color",
)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(
mode=("fine",), split=("train", "test", "val"), target_type=TARGET_TYPES
),
*datasets_utils.combinations_grid(
mode=("coarse",),
split=("train", "train_extra", "val"),
target_type=TARGET_TYPES,
),
)
FEATURE_TYPES = (PIL.Image.Image, (dict, PIL.Image.Image))
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
mode_to_splits = {
"Coarse": ["train", "train_extra", "val"],
"Fine": ["train", "test", "val"],
}
if config["split"] == "train": # just for coverage of the number of samples
cities = ["bochum", "bremen"]
else:
cities = ["bochum"]
polygon_target = {
"imgHeight": 1024,
"imgWidth": 2048,
"objects": [
{
"label": "sky",
"polygon": [
[1241, 0],
[1234, 156],
[1478, 197],
[1611, 172],
[1606, 0],
],
},
{
"label": "road",
"polygon": [
[0, 448],
[1331, 274],
[1473, 265],
[2047, 605],
[2047, 1023],
[0, 1023],
],
},
],
}
for mode in ["Coarse", "Fine"]:
gt_dir = tmpdir / f"gt{mode}"
for split in mode_to_splits[mode]:
for city in cities:
def make_image(name, size=10):
datasets_utils.create_image_folder(
root=gt_dir / split,
name=city,
file_name_fn=lambda _: name,
size=size,
num_examples=1,
)
make_image(f"{city}_000000_000000_gt{mode}_instanceIds.png")
make_image(f"{city}_000000_000000_gt{mode}_labelIds.png")
make_image(f"{city}_000000_000000_gt{mode}_color.png", size=(4, 10, 10))
polygon_target_name = gt_dir / split / city / f"{city}_000000_000000_gt{mode}_polygons.json"
with open(polygon_target_name, "w") as outfile:
json.dump(polygon_target, outfile)
# Create leftImg8bit folder
for split in ['test', 'train_extra', 'train', 'val']:
for city in cities:
datasets_utils.create_image_folder(
root=tmpdir / "leftImg8bit" / split,
name=city,
file_name_fn=lambda _: f"{city}_000000_000000_leftImg8bit.png",
num_examples=1,
)
info = {'num_examples': len(cities)}
if config['target_type'] == 'polygon':
info['expected_polygon_target'] = polygon_target
return info
def test_combined_targets(self):
target_types = ['semantic', 'polygon', 'color']
with self.create_dataset(target_type=target_types) as (dataset, _):
output = dataset[0]
self.assertTrue(isinstance(output, tuple))
self.assertTrue(len(output) == 2)
self.assertTrue(isinstance(output[0], PIL.Image.Image))
self.assertTrue(isinstance(output[1], tuple))
self.assertTrue(len(output[1]) == 3)
self.assertTrue(isinstance(output[1][0], PIL.Image.Image)) # semantic
self.assertTrue(isinstance(output[1][1], dict)) # polygon
self.assertTrue(isinstance(output[1][2], PIL.Image.Image)) # color
def test_feature_types_target_color(self):
with self.create_dataset(target_type='color') as (dataset, _):
color_img, color_target = dataset[0]
self.assertTrue(isinstance(color_img, PIL.Image.Image))
self.assertTrue(np.array(color_target).shape[2] == 4)
def test_feature_types_target_polygon(self):
with self.create_dataset(target_type='polygon') as (dataset, info):
polygon_img, polygon_target = dataset[0]
self.assertTrue(isinstance(polygon_img, PIL.Image.Image))
self.assertEqual(polygon_target, info['expected_polygon_target'])
class ImageNetTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.ImageNet
REQUIRED_PACKAGES = ('scipy',)
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val'))
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
wnid = 'n01234567'
if config['split'] == 'train':
num_examples = 3
datasets_utils.create_image_folder(
root=tmpdir,
name=tmpdir / 'train' / wnid / wnid,
file_name_fn=lambda image_idx: f"{wnid}_{image_idx}.JPEG",
num_examples=num_examples,
)
else:
num_examples = 1
datasets_utils.create_image_folder(
root=tmpdir,
name=tmpdir / 'val' / wnid,
file_name_fn=lambda image_ifx: "ILSVRC2012_val_0000000{image_idx}.JPEG",
num_examples=num_examples,
)
wnid_to_classes = {wnid: [1]}
torch.save((wnid_to_classes, None), tmpdir / 'meta.bin')
return num_examples
class CIFAR10TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CIFAR10
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
_VERSION_CONFIG = dict(
base_folder="cifar-10-batches-py",
train_files=tuple(f"data_batch_{idx}" for idx in range(1, 6)),
test_files=("test_batch",),
labels_key="labels",
meta_file="batches.meta",
num_categories=10,
categories_key="label_names",
)
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir) / self._VERSION_CONFIG["base_folder"]
os.makedirs(tmpdir)
num_images_per_file = 1
for name in itertools.chain(self._VERSION_CONFIG["train_files"], self._VERSION_CONFIG["test_files"]):
self._create_batch_file(tmpdir, name, num_images_per_file)
categories = self._create_meta_file(tmpdir)
return dict(
num_examples=num_images_per_file
* len(self._VERSION_CONFIG["train_files"] if config["train"] else self._VERSION_CONFIG["test_files"]),
categories=categories,
)
def _create_batch_file(self, root, name, num_images):
data = datasets_utils.create_image_or_video_tensor((num_images, 32 * 32 * 3))
labels = np.random.randint(0, self._VERSION_CONFIG["num_categories"], size=num_images).tolist()
self._create_binary_file(root, name, {"data": data, self._VERSION_CONFIG["labels_key"]: labels})
def _create_meta_file(self, root):
categories = [
f"{idx:0{len(str(self._VERSION_CONFIG['num_categories'] - 1))}d}"
for idx in range(self._VERSION_CONFIG["num_categories"])
]
self._create_binary_file(
root, self._VERSION_CONFIG["meta_file"], {self._VERSION_CONFIG["categories_key"]: categories}
)
return categories
def _create_binary_file(self, root, name, content):
with open(pathlib.Path(root) / name, "wb") as fh:
pickle.dump(content, fh)
def test_class_to_idx(self):
with self.create_dataset() as (dataset, info):
expected = {category: label for label, category in enumerate(info["categories"])}
actual = dataset.class_to_idx
self.assertEqual(actual, expected)
class CIFAR100(CIFAR10TestCase):
DATASET_CLASS = datasets.CIFAR100
_VERSION_CONFIG = dict(
base_folder="cifar-100-python",
train_files=("train",),
test_files=("test",),
labels_key="fine_labels",
meta_file="meta",
num_categories=100,
categories_key="fine_label_names",
)
class CelebATestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CelebA
FEATURE_TYPES = (PIL.Image.Image, (torch.Tensor, int, tuple, type(None)))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
split=("train", "valid", "test", "all"),
target_type=("attr", "identity", "bbox", "landmarks", ["attr", "identity"]),
)
REQUIRED_PACKAGES = ("pandas",)
_SPLIT_TO_IDX = dict(train=0, valid=1, test=2)
def inject_fake_data(self, tmpdir, config):
base_folder = pathlib.Path(tmpdir) / "celeba"
os.makedirs(base_folder)
num_images, num_images_per_split = self._create_split_txt(base_folder)
datasets_utils.create_image_folder(
base_folder, "img_align_celeba", lambda idx: f"{idx + 1:06d}.jpg", num_images
)
attr_names = self._create_attr_txt(base_folder, num_images)
self._create_identity_txt(base_folder, num_images)
self._create_bbox_txt(base_folder, num_images)
self._create_landmarks_txt(base_folder, num_images)
return dict(num_examples=num_images_per_split[config["split"]], attr_names=attr_names)
def _create_split_txt(self, root):
num_images_per_split = dict(train=3, valid=2, test=1)
data = [
[self._SPLIT_TO_IDX[split]] for split, num_images in num_images_per_split.items() for _ in range(num_images)
]
self._create_txt(root, "list_eval_partition.txt", data)
num_images_per_split["all"] = num_images = sum(num_images_per_split.values())
return num_images, num_images_per_split
def _create_attr_txt(self, root, num_images):
header = ("5_o_Clock_Shadow", "Young")
data = torch.rand((num_images, len(header))).ge(0.5).int().mul(2).sub(1).tolist()
self._create_txt(root, "list_attr_celeba.txt", data, header=header, add_num_examples=True)
return header
def _create_identity_txt(self, root, num_images):
data = torch.randint(1, 4, size=(num_images, 1)).tolist()
self._create_txt(root, "identity_CelebA.txt", data)
def _create_bbox_txt(self, root, num_images):
header = ("x_1", "y_1", "width", "height")
data = torch.randint(10, size=(num_images, len(header))).tolist()
self._create_txt(
root, "list_bbox_celeba.txt", data, header=header, add_num_examples=True, add_image_id_to_header=True
)
def _create_landmarks_txt(self, root, num_images):
header = ("lefteye_x", "rightmouth_y")
data = torch.randint(10, size=(num_images, len(header))).tolist()
self._create_txt(root, "list_landmarks_align_celeba.txt", data, header=header, add_num_examples=True)
def _create_txt(self, root, name, data, header=None, add_num_examples=False, add_image_id_to_header=False):
with open(pathlib.Path(root) / name, "w") as fh:
if add_num_examples:
fh.write(f"{len(data)}\n")
if header:
if add_image_id_to_header:
header = ("image_id", *header)
fh.write(f"{' '.join(header)}\n")
for idx, line in enumerate(data, 1):
fh.write(f"{' '.join((f'{idx:06d}.jpg', *[str(value) for value in line]))}\n")
def test_combined_targets(self):
target_types = ["attr", "identity", "bbox", "landmarks"]
individual_targets = []
for target_type in target_types:
with self.create_dataset(target_type=target_type) as (dataset, _):
_, target = dataset[0]
individual_targets.append(target)
with self.create_dataset(target_type=target_types) as (dataset, _):
_, combined_targets = dataset[0]
actual = len(individual_targets)
expected = len(combined_targets)
self.assertEqual(
actual,
expected,
f"The number of the returned combined targets does not match the the number targets if requested "
f"individually: {actual} != {expected}",
)
for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):
with self.subTest(target_type=target_type):
actual = type(combined_target)
expected = type(individual_target)
self.assertIs(
actual,
expected,
f"Type of the combined target does not match the type of the corresponding individual target: "
f"{actual} is not {expected}",
)
def test_no_target(self):
with self.create_dataset(target_type=[]) as (dataset, _):
_, target = dataset[0]
self.assertIsNone(target)
def test_attr_names(self):
with self.create_dataset() as (dataset, info):
self.assertEqual(tuple(dataset.attr_names), info["attr_names"])
class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.VOCSegmentation
FEATURE_TYPES = (PIL.Image.Image, PIL.Image.Image)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(
year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval")
),
dict(year="2007", image_set="test"),
dict(year="2007-test", image_set="test"),
)
def inject_fake_data(self, tmpdir, config):
year, is_test_set = (
("2007", True)
if config["year"] == "2007-test" or config["image_set"] == "test"
else (config["year"], False)
)
image_set = config["image_set"]
base_dir = pathlib.Path(tmpdir)
if year == "2011":
base_dir /= "TrainVal"
base_dir = base_dir / "VOCdevkit" / f"VOC{year}"
os.makedirs(base_dir)
num_images, num_images_per_image_set = self._create_image_set_files(base_dir, "ImageSets", is_test_set)
datasets_utils.create_image_folder(base_dir, "JPEGImages", lambda idx: f"{idx:06d}.jpg", num_images)
datasets_utils.create_image_folder(base_dir, "SegmentationClass", lambda idx: f"{idx:06d}.png", num_images)
annotation = self._create_annotation_files(base_dir, "Annotations", num_images)
return dict(num_examples=num_images_per_image_set[image_set], annotation=annotation)
def _create_image_set_files(self, root, name, is_test_set):
root = pathlib.Path(root) / name
src = pathlib.Path(root) / "Main"
os.makedirs(src, exist_ok=True)
idcs = dict(train=(0, 1, 2), val=(3, 4), test=(5,))
idcs["trainval"] = (*idcs["train"], *idcs["val"])
for image_set in ("test",) if is_test_set else ("train", "val", "trainval"):
self._create_image_set_file(src, image_set, idcs[image_set])
shutil.copytree(src, root / "Segmentation")
num_images = max(itertools.chain(*idcs.values())) + 1
num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()])
return num_images, num_images_per_image_set
def _create_image_set_file(self, root, image_set, idcs):
with open(pathlib.Path(root) / f"{image_set}.txt", "w") as fh:
fh.writelines([f"{idx:06d}\n" for idx in idcs])
def _create_annotation_files(self, root, name, num_images):
root = pathlib.Path(root) / name
os.makedirs(root)
for idx in range(num_images):
annotation = self._create_annotation_file(root, f"{idx:06d}.xml")
return annotation
def _create_annotation_file(self, root, name):
def add_child(parent, name, text=None):
child = ET.SubElement(parent, name)
child.text = text
return child
def add_name(obj, name="dog"):
add_child(obj, "name", name)
return name
def add_bndbox(obj, bndbox=None):
if bndbox is None:
bndbox = {"xmin": "1", "xmax": "2", "ymin": "3", "ymax": "4"}
obj = add_child(obj, "bndbox")
for name, text in bndbox.items():
add_child(obj, name, text)
return bndbox
annotation = ET.Element("annotation")
obj = add_child(annotation, "object")
data = dict(name=add_name(obj), bndbox=add_bndbox(obj))
with open(pathlib.Path(root) / name, "wb") as fh:
fh.write(ET.tostring(annotation))
return data
class VOCDetectionTestCase(VOCSegmentationTestCase):
DATASET_CLASS = datasets.VOCDetection
FEATURE_TYPES = (PIL.Image.Image, dict)
def test_annotations(self):
with self.create_dataset() as (dataset, info):
_, target = dataset[0]
self.assertIn("annotation", target)
annotation = target["annotation"]
self.assertIn("object", annotation)
objects = annotation["object"]
self.assertEqual(len(objects), 1)
object = objects[0]
self.assertEqual(object, info["annotation"])
class CocoDetectionTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CocoDetection
FEATURE_TYPES = (PIL.Image.Image, list)
REQUIRED_PACKAGES = ("pycocotools",)
_IMAGE_FOLDER = "images"
_ANNOTATIONS_FOLDER = "annotations"
_ANNOTATIONS_FILE = "annotations.json"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._IMAGE_FOLDER
annotation_file = tmpdir / self._ANNOTATIONS_FOLDER / self._ANNOTATIONS_FILE
return root, annotation_file
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
num_images = 3
num_annotations_per_image = 2
files = datasets_utils.create_image_folder(
tmpdir, name=self._IMAGE_FOLDER, file_name_fn=lambda idx: f"{idx:012d}.jpg", num_examples=num_images
)
file_names = [file.relative_to(tmpdir / self._IMAGE_FOLDER) for file in files]
annotation_folder = tmpdir / self._ANNOTATIONS_FOLDER
os.makedirs(annotation_folder)
info = self._create_annotation_file(
annotation_folder, self._ANNOTATIONS_FILE, file_names, num_annotations_per_image
)
info["num_examples"] = num_images
return info
def _create_annotation_file(self, root, name, file_names, num_annotations_per_image):
image_ids = [int(file_name.stem) for file_name in file_names]
images = [dict(file_name=str(file_name), id=id) for file_name, id in zip(file_names, image_ids)]
annotations, info = self._create_annotations(image_ids, num_annotations_per_image)
self._create_json(root, name, dict(images=images, annotations=annotations))
return info
def _create_annotations(self, image_ids, num_annotations_per_image):
annotations = datasets_utils.combinations_grid(
image_id=image_ids, bbox=([1.0, 2.0, 3.0, 4.0],) * num_annotations_per_image
)
for id, annotation in enumerate(annotations):
annotation["id"] = id
return annotations, dict()
def _create_json(self, root, name, content):
file = pathlib.Path(root) / name
with open(file, "w") as fh:
json.dump(content, fh)
return file
class CocoCaptionsTestCase(CocoDetectionTestCase):
DATASET_CLASS = datasets.CocoCaptions
def _create_annotations(self, image_ids, num_annotations_per_image):
captions = [str(idx) for idx in range(num_annotations_per_image)]
annotations = datasets_utils.combinations_grid(image_id=image_ids, caption=captions)
for id, annotation in enumerate(annotations):
annotation["id"] = id
return annotations, dict(captions=captions)
def test_captions(self):
with self.create_dataset() as (dataset, info):
_, captions = dataset[0]
self.assertEqual(tuple(captions), tuple(info["captions"]))
class UCF101TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.UCF101
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))
_VIDEO_FOLDER = "videos"
_ANNOTATIONS_FOLDER = "annotations"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._VIDEO_FOLDER
annotation_path = tmpdir / self._ANNOTATIONS_FOLDER
return root, annotation_path
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
video_folder = tmpdir / self._VIDEO_FOLDER
os.makedirs(video_folder)
video_files = self._create_videos(video_folder)
annotations_folder = tmpdir / self._ANNOTATIONS_FOLDER
os.makedirs(annotations_folder)
num_examples = self._create_annotation_files(annotations_folder, video_files, config["fold"], config["train"])
return num_examples
def _create_videos(self, root, num_examples_per_class=3):
def file_name_fn(cls, idx, clips_per_group=2):
return f"v_{cls}_g{(idx // clips_per_group) + 1:02d}_c{(idx % clips_per_group) + 1:02d}.avi"
video_files = [
datasets_utils.create_video_folder(root, cls, lambda idx: file_name_fn(cls, idx), num_examples_per_class)
for cls in ("ApplyEyeMakeup", "YoYo")
]
return [path.relative_to(root) for path in itertools.chain(*video_files)]
def _create_annotation_files(self, root, video_files, fold, train):
current_videos = random.sample(video_files, random.randrange(1, len(video_files) - 1))
current_annotation = self._annotation_file_name(fold, train)
self._create_annotation_file(root, current_annotation, current_videos)
other_videos = set(video_files) - set(current_videos)
other_annotations = [
self._annotation_file_name(fold, train) for fold, train in itertools.product((1, 2, 3), (True, False))
]
other_annotations.remove(current_annotation)
for name in other_annotations:
self._create_annotation_file(root, name, other_videos)
return len(current_videos)
def _annotation_file_name(self, fold, train):
return f"{'train' if train else 'test'}list{fold:02d}.txt"
def _create_annotation_file(self, root, name, video_files):
with open(pathlib.Path(root) / name, "w") as fh:
fh.writelines(f"{file}\n" for file in sorted(video_files))
class LSUNTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.LSUN
REQUIRED_PACKAGES = ("lmdb",)
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
classes=("train", "test", "val", ["bedroom_train", "church_outdoor_train"])
)
_CATEGORIES = (
"bedroom",
"bridge",
"church_outdoor",
"classroom",
"conference_room",
"dining_room",
"kitchen",
"living_room",
"restaurant",
"tower",
)
def inject_fake_data(self, tmpdir, config):
root = pathlib.Path(tmpdir)
num_images = 0
for cls in self._parse_classes(config["classes"]):
num_images += self._create_lmdb(root, cls)
return num_images
@contextlib.contextmanager
def create_dataset(
self,
*args, **kwargs
):
with super().create_dataset(*args, **kwargs) as output:
yield output
# Currently datasets.LSUN caches the keys in the current directory rather than in the root directory. Thus,
# this creates a number of unique _cache_* files in the current directory that will not be removed together
# with the temporary directory
for file in os.listdir(os.getcwd()):
if file.startswith("_cache_"):
os.remove(file)
def _parse_classes(self, classes):
if not isinstance(classes, str):
return classes
split = classes
if split == "test":
return [split]
return [f"{category}_{split}" for category in self._CATEGORIES]
def _create_lmdb(self, root, cls):
lmdb = datasets_utils.lazy_importer.lmdb
hexdigits_lowercase = string.digits + string.ascii_lowercase[:6]
folder = f"{cls}_lmdb"
num_images = torch.randint(1, 4, size=()).item()
format = "png"
files = datasets_utils.create_image_folder(root, folder, lambda idx: f"{idx}.{format}", num_images)
with lmdb.open(str(root / folder)) as env, env.begin(write=True) as txn:
for file in files:
key = "".join(random.choice(hexdigits_lowercase) for _ in range(40)).encode()
buffer = io.BytesIO()
Image.open(file).save(buffer, format)
buffer.seek(0)
value = buffer.read()
txn.put(key, value)
os.remove(file)
return num_images
def test_not_found_or_corrupted(self):
# LSUN does not raise built-in exception, but a custom one. It is expressive enough to not 'cast' it to
# RuntimeError or FileNotFoundError that are normally checked by this test.
with self.assertRaises(datasets_utils.lazy_importer.lmdb.Error):
super().test_not_found_or_corrupted()
class Kinetics400TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.Kinetics400
def inject_fake_data(self, tmpdir, config):
classes = ("Abseiling", "Zumba")
num_videos_per_class = 2
digits = string.ascii_letters + string.digits + "-_"
for cls in classes:
datasets_utils.create_video_folder(
tmpdir,
cls,
lambda _: f"{datasets_utils.create_random_string(11, digits)}.avi",
num_videos_per_class,
)
return num_videos_per_class * len(classes)
class HMDB51TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.HMDB51
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))
_VIDEO_FOLDER = "videos"
_SPLITS_FOLDER = "splits"
_CLASSES = ("brush_hair", "wave")
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._VIDEO_FOLDER
annotation_path = tmpdir / self._SPLITS_FOLDER
return root, annotation_path
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
video_folder = tmpdir / self._VIDEO_FOLDER
os.makedirs(video_folder)
video_files = self._create_videos(video_folder)
splits_folder = tmpdir / self._SPLITS_FOLDER
os.makedirs(splits_folder)
num_examples = self._create_split_files(splits_folder, video_files, config["fold"], config["train"])
return num_examples
def _create_videos(self, root, num_examples_per_class=3):
def file_name_fn(cls, idx, clips_per_group=2):
return f"{cls}_{(idx // clips_per_group) + 1:d}_{(idx % clips_per_group) + 1:d}.avi"
return [
(
cls,
datasets_utils.create_video_folder(
root,
cls,
lambda idx: file_name_fn(cls, idx),
num_examples_per_class,
),
)
for cls in self._CLASSES
]
def _create_split_files(self, root, video_files, fold, train):
num_videos = num_train_videos = 0
for cls, videos in video_files:
num_videos += len(videos)
train_videos = set(random.sample(videos, random.randrange(1, len(videos) - 1)))
num_train_videos += len(train_videos)
with open(pathlib.Path(root) / f"{cls}_test_split{fold}.txt", "w") as fh:
fh.writelines(f"{file.name} {1 if file in train_videos else 2}\n" for file in videos)
return num_train_videos if train else (num_videos - num_train_videos)
class OmniglotTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Omniglot
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(background=(True, False))
def inject_fake_data(self, tmpdir, config):
target_folder = (
pathlib.Path(tmpdir) / "omniglot-py" / f"images_{'background' if config['background'] else 'evaluation'}"
)
os.makedirs(target_folder)
num_images = 0
for name in ("Alphabet_of_the_Magi", "Tifinagh"):
num_images += self._create_alphabet_folder(target_folder, name)
return num_images
def _create_alphabet_folder(self, root, name):
num_images_total = 0
for idx in range(torch.randint(1, 4, size=()).item()):
num_images = torch.randint(1, 4, size=()).item()
num_images_total += num_images
datasets_utils.create_image_folder(
root / name, f"character{idx:02d}", lambda image_idx: f"{image_idx:02d}.png", num_images
)
return num_images_total
class SBUTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SBU
FEATURE_TYPES = (PIL.Image.Image, str)
def inject_fake_data(self, tmpdir, config):
num_images = 3
dataset_folder = pathlib.Path(tmpdir) / "dataset"
images = datasets_utils.create_image_folder(tmpdir, "dataset", self._create_file_name, num_images)
self._create_urls_txt(dataset_folder, images)
self._create_captions_txt(dataset_folder, num_images)
return num_images
def _create_file_name(self, idx):
part1 = datasets_utils.create_random_string(10, string.digits)
part2 = datasets_utils.create_random_string(10, string.ascii_lowercase, string.digits[:6])
return f"{part1}_{part2}.jpg"
def _create_urls_txt(self, root, images):
with open(root / "SBU_captioned_photo_dataset_urls.txt", "w") as fh:
for image in images:
fh.write(
f"http://static.flickr.com/{datasets_utils.create_random_string(4, string.digits)}/{image.name}\n"
)
def _create_captions_txt(self, root, num_images):
with open(root / "SBU_captioned_photo_dataset_captions.txt", "w") as fh:
for _ in range(num_images):
fh.write(f"{datasets_utils.create_random_string(10)}\n")
class SEMEIONTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SEMEION
def inject_fake_data(self, tmpdir, config):
num_images = 3
images = torch.rand(num_images, 256)
labels = F.one_hot(torch.randint(10, size=(num_images,)))
with open(pathlib.Path(tmpdir) / "semeion.data", "w") as fh:
for image, one_hot_labels in zip(images, labels):
image_columns = " ".join([f"{pixel.item():.4f}" for pixel in image])
labels_columns = " ".join([str(label.item()) for label in one_hot_labels])
fh.write(f"{image_columns} {labels_columns}\n")
return num_images
class USPSTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.USPS
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
def inject_fake_data(self, tmpdir, config):
num_images = 2 if config["train"] else 1
images = torch.rand(num_images, 256) * 2 - 1
labels = torch.randint(1, 11, size=(num_images,))
with bz2.open(pathlib.Path(tmpdir) / f"usps{'.t' if not config['train'] else ''}.bz2", "w") as fh:
for image, label in zip(images, labels):
line = " ".join((str(label.item()), *[f"{idx}:{pixel:.6f}" for idx, pixel in enumerate(image, 1)]))
fh.write(f"{line}\n".encode())
return num_images
class SBDatasetTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SBDataset
FEATURE_TYPES = (PIL.Image.Image, (np.ndarray, PIL.Image.Image))
REQUIRED_PACKAGES = ("scipy.io", "scipy.sparse")
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
image_set=("train", "val", "train_noval"), mode=("boundaries", "segmentation")
)
_NUM_CLASSES = 20
def inject_fake_data(self, tmpdir, config):
num_images, num_images_per_image_set = self._create_split_files(tmpdir)
sizes = self._create_target_folder(tmpdir, "cls", num_images)
datasets_utils.create_image_folder(
tmpdir, "img", lambda idx: f"{self._file_stem(idx)}.jpg", num_images, size=lambda idx: sizes[idx]
)
return num_images_per_image_set[config["image_set"]]
def _create_split_files(self, root):
root = pathlib.Path(root)
splits = dict(train=(0, 1, 2), train_noval=(0, 2), val=(3,))
for split, idcs in splits.items():
self._create_split_file(root, split, idcs)
num_images = max(itertools.chain(*splits.values())) + 1
num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()])
return num_images, num_images_per_split
def _create_split_file(self, root, name, idcs):
with open(root / f"{name}.txt", "w") as fh:
fh.writelines(f"{self._file_stem(idx)}\n" for idx in idcs)
def _create_target_folder(self, root, name, num_images):
io = datasets_utils.lazy_importer.scipy.io
target_folder = pathlib.Path(root) / name
os.makedirs(target_folder)
sizes = [torch.randint(1, 4, size=(2,)).tolist() for _ in range(num_images)]
for idx, size in enumerate(sizes):
content = dict(
GTcls=dict(Boundaries=self._create_boundaries(size), Segmentation=self._create_segmentation(size))
)
io.savemat(target_folder / f"{self._file_stem(idx)}.mat", content)
return sizes
def _create_boundaries(self, size):
sparse = datasets_utils.lazy_importer.scipy.sparse
return [
[sparse.csc_matrix(torch.randint(0, 2, size=size, dtype=torch.uint8).numpy())]
for _ in range(self._NUM_CLASSES)
]
def _create_segmentation(self, size):
return torch.randint(0, self._NUM_CLASSES + 1, size=size, dtype=torch.uint8).numpy()
def _file_stem(self, idx):
return f"2008_{idx:06d}"
class FakeDataTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.FakeData
FEATURE_TYPES = (PIL.Image.Image, int)
def dataset_args(self, tmpdir, config):
return ()
def inject_fake_data(self, tmpdir, config):
return config["size"]
def test_not_found_or_corrupted(self):
self.skipTest("The data is generated at creation and thus cannot be non-existent or corrupted.")
class PhotoTourTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.PhotoTour
# The PhotoTour dataset returns examples with different features with respect to the 'train' parameter. Thus,
# we overwrite 'FEATURE_TYPES' with a dummy value to satisfy the initial checks of the base class. Furthermore, we
# overwrite the 'test_feature_types()' method to select the correct feature types before the test is run.
FEATURE_TYPES = ()
_TRAIN_FEATURE_TYPES = (torch.Tensor,)
_TEST_FEATURE_TYPES = (torch.Tensor, torch.Tensor, torch.Tensor)
datasets_utils.combinations_grid(train=(True, False))
_NAME = "liberty"
def dataset_args(self, tmpdir, config):
return tmpdir, self._NAME
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
# In contrast to the original data, the fake images injected here comprise only a single patch. Thus,
# num_images == num_patches.
num_patches = 5
image_files = self._create_images(tmpdir, self._NAME, num_patches)
point_ids, info_file = self._create_info_file(tmpdir / self._NAME, num_patches)
num_matches, matches_file = self._create_matches_file(tmpdir / self._NAME, num_patches, point_ids)
self._create_archive(tmpdir, self._NAME, *image_files, info_file, matches_file)
return num_patches if config["train"] else num_matches
def _create_images(self, root, name, num_images):
# The images in the PhotoTour dataset comprises of multiple grayscale patches of 64 x 64 pixels. Thus, the
# smallest fake image is 64 x 64 pixels and comprises a single patch.
return datasets_utils.create_image_folder(
root, name, lambda idx: f"patches{idx:04d}.bmp", num_images, size=(1, 64, 64)
)
def _create_info_file(self, root, num_images):
point_ids = torch.randint(num_images, size=(num_images,)).tolist()
file = root / "info.txt"
with open(file, "w") as fh:
fh.writelines([f"{point_id} 0\n" for point_id in point_ids])
return point_ids, file
def _create_matches_file(self, root, num_patches, point_ids):
lines = [
f"{patch_id1} {point_ids[patch_id1]} 0 {patch_id2} {point_ids[patch_id2]} 0\n"
for patch_id1, patch_id2 in itertools.combinations(range(num_patches), 2)
]
file = root / "m50_100000_100000_0.txt"
with open(file, "w") as fh:
fh.writelines(lines)
return len(lines), file
def _create_archive(self, root, name, *files):
archive = root / f"{name}.zip"
with zipfile.ZipFile(archive, "w") as zip:
for file in files:
zip.write(file, arcname=file.relative_to(root))
return archive
@datasets_utils.test_all_configs
def test_feature_types(self, config):
feature_types = self.FEATURE_TYPES
self.FEATURE_TYPES = self._TRAIN_FEATURE_TYPES if config["train"] else self._TEST_FEATURE_TYPES
try:
super().test_feature_types.__wrapped__(self, config)
finally:
self.FEATURE_TYPES = feature_types
class Flickr8kTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Flickr8k
FEATURE_TYPES = (PIL.Image.Image, list)
_IMAGES_FOLDER = "images"
_ANNOTATIONS_FILE = "captions.html"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._IMAGES_FOLDER
ann_file = tmpdir / self._ANNOTATIONS_FILE
return str(root), str(ann_file)
def inject_fake_data(self, tmpdir, config):
num_images = 3
num_captions_per_image = 3
tmpdir = pathlib.Path(tmpdir)
images = self._create_images(tmpdir, self._IMAGES_FOLDER, num_images)
self._create_annotations_file(tmpdir, self._ANNOTATIONS_FILE, images, num_captions_per_image)
return dict(num_examples=num_images, captions=self._create_captions(num_captions_per_image))
def _create_images(self, root, name, num_images):
return datasets_utils.create_image_folder(root, name, self._image_file_name, num_images)
def _image_file_name(self, idx):
id = datasets_utils.create_random_string(10, string.digits)
checksum = datasets_utils.create_random_string(10, string.digits, string.ascii_lowercase[:6])
size = datasets_utils.create_random_string(1, "qwcko")
return f"{id}_{checksum}_{size}.jpg"
def _create_annotations_file(self, root, name, images, num_captions_per_image):
with open(root / name, "w") as fh:
fh.write("<table>")
for image in (None, *images):
self._add_image(fh, image, num_captions_per_image)
fh.write("</table>")
def _add_image(self, fh, image, num_captions_per_image):
fh.write("<tr>")
self._add_image_header(fh, image)
fh.write("</tr><tr><td><ul>")
self._add_image_captions(fh, num_captions_per_image)
fh.write("</ul></td></tr>")
def _add_image_header(self, fh, image=None):
if image:
url = f"http://www.flickr.com/photos/user/{image.name.split('_')[0]}/"
data = f'<a href="{url}">{url}</a>'
else:
data = "Image Not Found"
fh.write(f"<td>{data}</td>")
def _add_image_captions(self, fh, num_captions_per_image):
for caption in self._create_captions(num_captions_per_image):
fh.write(f"<li>{caption}")
def _create_captions(self, num_captions_per_image):
return [str(idx) for idx in range(num_captions_per_image)]
def test_captions(self):
with self.create_dataset() as (dataset, info):
_, captions = dataset[0]
self.assertSequenceEqual(captions, info["captions"])
class Flickr30kTestCase(Flickr8kTestCase):
DATASET_CLASS = datasets.Flickr30k
FEATURE_TYPES = (PIL.Image.Image, list)
_ANNOTATIONS_FILE = "captions.token"
def _image_file_name(self, idx):
return f"{idx}.jpg"
def _create_annotations_file(self, root, name, images, num_captions_per_image):
with open(root / name, "w") as fh:
for image, (idx, caption) in itertools.product(
images, enumerate(self._create_captions(num_captions_per_image))
):
fh.write(f"{image.name}#{idx}\t{caption}\n")
class MNISTTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.MNIST
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
_MAGIC_DTYPES = {
torch.uint8: 8,
torch.int8: 9,
torch.int16: 11,
torch.int32: 12,
torch.float32: 13,
torch.float64: 14,
}
_IMAGES_SIZE = (28, 28)
_IMAGES_DTYPE = torch.uint8
_LABELS_SIZE = ()
_LABELS_DTYPE = torch.uint8
def inject_fake_data(self, tmpdir, config):
raw_dir = pathlib.Path(tmpdir) / self.DATASET_CLASS.__name__ / "raw"
os.makedirs(raw_dir, exist_ok=True)
num_images = self._num_images(config)
self._create_binary_file(
raw_dir, self._images_file(config), (num_images, *self._IMAGES_SIZE), self._IMAGES_DTYPE
)
self._create_binary_file(
raw_dir, self._labels_file(config), (num_images, *self._LABELS_SIZE), self._LABELS_DTYPE
)
return num_images
def _num_images(self, config):
return 2 if config["train"] else 1
def _images_file(self, config):
return f"{self._prefix(config)}-images-idx3-ubyte"
def _labels_file(self, config):
return f"{self._prefix(config)}-labels-idx1-ubyte"
def _prefix(self, config):
return "train" if config["train"] else "t10k"
def _create_binary_file(self, root, filename, size, dtype):
with open(pathlib.Path(root) / filename, "wb") as fh:
for meta in (self._magic(dtype, len(size)), *size):
fh.write(self._encode(meta))
# If ever an MNIST variant is added that uses floating point data, this should be adapted.
data = torch.randint(0, torch.iinfo(dtype).max + 1, size, dtype=dtype)
fh.write(data.numpy().tobytes())
def _magic(self, dtype, dims):
return self._MAGIC_DTYPES[dtype] * 256 + dims
def _encode(self, v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
class FashionMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.FashionMNIST
class KMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.KMNIST
class EMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.EMNIST
DEFAULT_CONFIG = dict(split="byclass")
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
split=("byclass", "bymerge", "balanced", "letters", "digits", "mnist"), train=(True, False)
)
def _prefix(self, config):
return f"emnist-{config['split']}-{'train' if config['train'] else 'test'}"
class QMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.QMNIST
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(what=("train", "test", "test10k", "nist"))
_LABELS_SIZE = (8,)
_LABELS_DTYPE = torch.int32
def _num_images(self, config):
if config["what"] == "nist":
return 3
elif config["what"] == "train":
return 2
elif config["what"] == "test50k":
# The split 'test50k' is defined as the last 50k images beginning at index 10000. Thus, we need to create
# more than 10000 images for the dataset to not be empty. Since this takes significantly longer than the
# creation of all other splits, this is excluded from the 'ADDITIONAL_CONFIGS' and is tested only once in
# 'test_num_examples_test50k'.
return 10001
else:
return 1
def _labels_file(self, config):
return f"{self._prefix(config)}-labels-idx2-int"
def _prefix(self, config):
if config["what"] == "nist":
return "xnist"
if config["what"] is None:
what = "train" if config["train"] else "test"
elif config["what"].startswith("test"):
what = "test"
else:
what = config["what"]
return f"qmnist-{what}"
def test_num_examples_test50k(self):
with self.create_dataset(what="test50k") as (dataset, info):
# Since the split 'test50k' selects all images beginning from the index 10000, we subtract the number of
# created examples by this.
self.assertEqual(len(dataset), info["num_examples"] - 10000)
class DatasetFolderTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.DatasetFolder
# The dataset has no fixed return type since it is defined by the loader parameter. For testing, we use a loader
# that simply returns the path as type 'str' instead of loading anything. See the 'dataset_args()' method.
FEATURE_TYPES = (str, int)
_IMAGE_EXTENSIONS = ("jpg", "png")
_VIDEO_EXTENSIONS = ("avi", "mp4")
_EXTENSIONS = (*_IMAGE_EXTENSIONS, *_VIDEO_EXTENSIONS)
# DatasetFolder has two mutually exclusive parameters: 'extensions' and 'is_valid_file'. One of both is required.
# We only iterate over different 'extensions' here and handle the tests for 'is_valid_file' in the
# 'test_is_valid_file()' method.
DEFAULT_CONFIG = dict(extensions=_EXTENSIONS)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(extensions=[(ext,) for ext in _IMAGE_EXTENSIONS]),
dict(extensions=_IMAGE_EXTENSIONS),
*datasets_utils.combinations_grid(extensions=[(ext,) for ext in _VIDEO_EXTENSIONS]),
dict(extensions=_VIDEO_EXTENSIONS),
)
def dataset_args(self, tmpdir, config):
return tmpdir, lambda x: x
def inject_fake_data(self, tmpdir, config):
extensions = config["extensions"] or self._is_valid_file_to_extensions(config["is_valid_file"])
num_examples_total = 0
classes = []
for ext, cls in zip(self._EXTENSIONS, string.ascii_letters):
if ext not in extensions:
continue
create_example_folder = (
datasets_utils.create_image_folder
if ext in self._IMAGE_EXTENSIONS
else datasets_utils.create_video_folder
)
num_examples = torch.randint(1, 3, size=()).item()
create_example_folder(tmpdir, cls, lambda idx: self._file_name_fn(cls, ext, idx), num_examples)
num_examples_total += num_examples
classes.append(cls)
return dict(num_examples=num_examples_total, classes=classes)
def _file_name_fn(self, cls, ext, idx):
return f"{cls}_{idx}.{ext}"
def _is_valid_file_to_extensions(self, is_valid_file):
return {ext for ext in self._EXTENSIONS if is_valid_file(f"foo.{ext}")}
@datasets_utils.test_all_configs
def test_is_valid_file(self, config):
extensions = config.pop("extensions")
# We need to explicitly pass extensions=None here or otherwise it would be filled by the value from the
# DEFAULT_CONFIG.
with self.create_dataset(
config, extensions=None, is_valid_file=lambda file: pathlib.Path(file).suffix[1:] in extensions
) as (dataset, info):
self.assertEqual(len(dataset), info["num_examples"])
@datasets_utils.test_all_configs
def test_classes(self, config):
with self.create_dataset(config) as (dataset, info):
self.assertSequenceEqual(dataset.classes, info["classes"])
class ImageFolderTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.ImageFolder
def inject_fake_data(self, tmpdir, config):
num_examples_total = 0
classes = ("a", "b")
for cls in classes:
num_examples = torch.randint(1, 3, size=()).item()
num_examples_total += num_examples
datasets_utils.create_image_folder(tmpdir, cls, lambda idx: f"{cls}_{idx}.png", num_examples)
return dict(num_examples=num_examples_total, classes=classes)
@datasets_utils.test_all_configs
def test_classes(self, config):
with self.create_dataset(config) as (dataset, info):
self.assertSequenceEqual(dataset.classes, info["classes"])
class KittiTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Kitti
FEATURE_TYPES = (PIL.Image.Image, (list, type(None))) # test split returns None as target
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
def inject_fake_data(self, tmpdir, config):
kitti_dir = os.path.join(tmpdir, "Kitti", "raw")
os.makedirs(kitti_dir)
split_to_num_examples = {
True: 1,
False: 2,
}
# We need to create all folders(training and testing).
for is_training in (True, False):
num_examples = split_to_num_examples[is_training]
datasets_utils.create_image_folder(
root=kitti_dir,
name=os.path.join("training" if is_training else "testing", "image_2"),
file_name_fn=lambda image_idx: f"{image_idx:06d}.png",
num_examples=num_examples,
)
if is_training:
for image_idx in range(num_examples):
target_file_dir = os.path.join(kitti_dir, "training", "label_2")
os.makedirs(target_file_dir)
target_file_name = os.path.join(target_file_dir, f"{image_idx:06d}.txt")
target_contents = "Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01\n" # noqa
with open(target_file_name, "w") as target_file:
target_file.write(target_contents)
return split_to_num_examples[config["train"]]
if __name__ == "__main__":
unittest.main()
| 37.983936
| 136
| 0.634142
|
import contextlib
import sys
import os
import unittest
from unittest import mock
import numpy as np
import PIL
from PIL import Image
from torch._utils_internal import get_file_path_2
import torchvision
from torchvision.datasets import utils
from common_utils import get_tmp_dir
from fakedata_generation import svhn_root, places365_root, widerface_root, stl10_root
import xml.etree.ElementTree as ET
from urllib.request import Request, urlopen
import itertools
import datasets_utils
import pathlib
import pickle
from torchvision import datasets
import torch
import shutil
import json
import random
import bz2
import torch.nn.functional as F
import string
import io
import zipfile
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import av
HAS_PYAV = True
except ImportError:
HAS_PYAV = False
class DatasetTestcase(unittest.TestCase):
def generic_classification_dataset_test(self, dataset, num_images=1):
self.assertEqual(len(dataset), num_images)
img, target = dataset[0]
self.assertTrue(isinstance(img, PIL.Image.Image))
self.assertTrue(isinstance(target, int))
def generic_segmentation_dataset_test(self, dataset, num_images=1):
self.assertEqual(len(dataset), num_images)
img, target = dataset[0]
self.assertTrue(isinstance(img, PIL.Image.Image))
self.assertTrue(isinstance(target, PIL.Image.Image))
class Tester(DatasetTestcase):
@mock.patch('torchvision.datasets.SVHN._check_integrity')
@unittest.skipIf(not HAS_SCIPY, "scipy unavailable")
def test_svhn(self, mock_check):
mock_check.return_value = True
with svhn_root() as root:
dataset = torchvision.datasets.SVHN(root, split="train")
self.generic_classification_dataset_test(dataset, num_images=2)
dataset = torchvision.datasets.SVHN(root, split="test")
self.generic_classification_dataset_test(dataset, num_images=2)
dataset = torchvision.datasets.SVHN(root, split="extra")
self.generic_classification_dataset_test(dataset, num_images=2)
def test_places365(self):
for split, small in itertools.product(("train-standard", "train-challenge", "val"), (False, True)):
with places365_root(split=split, small=small) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)
self.generic_classification_dataset_test(dataset, num_images=len(data["imgs"]))
def test_places365_transforms(self):
expected_image = "image"
expected_target = "target"
def transform(image):
return expected_image
def target_transform(target):
return expected_target
with places365_root() as places365:
root, data = places365
dataset = torchvision.datasets.Places365(
root, transform=transform, target_transform=target_transform, download=True
)
actual_image, actual_target = dataset[0]
self.assertEqual(actual_image, expected_image)
self.assertEqual(actual_target, expected_target)
def test_places365_devkit_download(self):
for split in ("train-standard", "train-challenge", "val"):
with self.subTest(split=split):
with places365_root(split=split) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, download=True)
with self.subTest("classes"):
self.assertSequenceEqual(dataset.classes, data["classes"])
with self.subTest("class_to_idx"):
self.assertDictEqual(dataset.class_to_idx, data["class_to_idx"])
with self.subTest("imgs"):
self.assertSequenceEqual(dataset.imgs, data["imgs"])
def test_places365_devkit_no_download(self):
for split in ("train-standard", "train-challenge", "val"):
with self.subTest(split=split):
with places365_root(split=split) as places365:
root, data = places365
with self.assertRaises(RuntimeError):
torchvision.datasets.Places365(root, split=split, download=False)
def test_places365_images_download(self):
for split, small in itertools.product(("train-standard", "train-challenge", "val"), (False, True)):
with self.subTest(split=split, small=small):
with places365_root(split=split, small=small) as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)
assert all(os.path.exists(item[0]) for item in dataset.imgs)
def test_places365_images_download_preexisting(self):
split = "train-standard"
small = False
images_dir = "data_large_standard"
with places365_root(split=split, small=small) as places365:
root, data = places365
os.mkdir(os.path.join(root, images_dir))
with self.assertRaises(RuntimeError):
torchvision.datasets.Places365(root, split=split, small=small, download=True)
def test_places365_repr_smoke(self):
with places365_root() as places365:
root, data = places365
dataset = torchvision.datasets.Places365(root, download=True)
self.assertIsInstance(repr(dataset), str)
class STL10Tester(DatasetTestcase):
@contextlib.contextmanager
def mocked_root(self):
with stl10_root() as (root, data):
yield root, data
@contextlib.contextmanager
def mocked_dataset(self, pre_extract=False, download=True, **kwargs):
with self.mocked_root() as (root, data):
if pre_extract:
utils.extract_archive(os.path.join(root, data["archive"]))
dataset = torchvision.datasets.STL10(root, download=download, **kwargs)
yield dataset, data
def test_not_found(self):
with self.assertRaises(RuntimeError):
with self.mocked_dataset(download=False):
pass
def test_splits(self):
for split in ('train', 'train+unlabeled', 'unlabeled', 'test'):
with self.mocked_dataset(split=split) as (dataset, data):
num_images = sum([data["num_images_in_split"][part] for part in split.split("+")])
self.generic_classification_dataset_test(dataset, num_images=num_images)
def test_folds(self):
for fold in range(10):
with self.mocked_dataset(split="train", folds=fold) as (dataset, data):
num_images = data["num_images_in_folds"][fold]
self.assertEqual(len(dataset), num_images)
def test_invalid_folds1(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds=10):
pass
def test_invalid_folds2(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds="0"):
pass
def test_transforms(self):
expected_image = "image"
expected_target = "target"
def transform(image):
return expected_image
def target_transform(target):
return expected_target
with self.mocked_dataset(transform=transform, target_transform=target_transform) as (dataset, _):
actual_image, actual_target = dataset[0]
self.assertEqual(actual_image, expected_image)
self.assertEqual(actual_target, expected_target)
def test_unlabeled(self):
with self.mocked_dataset(split="unlabeled") as (dataset, _):
labels = [dataset[idx][1] for idx in range(len(dataset))]
self.assertTrue(all([label == -1 for label in labels]))
@unittest.mock.patch("torchvision.datasets.stl10.download_and_extract_archive")
def test_download_preexisting(self, mock):
with self.mocked_dataset(pre_extract=True) as (dataset, data):
mock.assert_not_called()
def test_repr_smoke(self):
with self.mocked_dataset() as (dataset, _):
self.assertIsInstance(repr(dataset), str)
class Caltech101TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Caltech101
FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
target_type=("category", "annotation", ["category", "annotation"])
)
REQUIRED_PACKAGES = ("scipy",)
def inject_fake_data(self, tmpdir, config):
root = pathlib.Path(tmpdir) / "caltech101"
images = root / "101_ObjectCategories"
annotations = root / "Annotations"
categories = (("Faces", "Faces_2"), ("helicopter", "helicopter"), ("ying_yang", "ying_yang"))
num_images_per_category = 2
for image_category, annotation_category in categories:
datasets_utils.create_image_folder(
root=images,
name=image_category,
file_name_fn=lambda idx: f"image_{idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
self._create_annotation_folder(
root=annotations,
name=annotation_category,
file_name_fn=lambda idx: f"annotation_{idx + 1:04d}.mat",
num_examples=num_images_per_category,
)
os.makedirs(images / "BACKGROUND_Google")
return num_images_per_category * len(categories)
def _create_annotation_folder(self, root, name, file_name_fn, num_examples):
root = pathlib.Path(root) / name
os.makedirs(root)
for idx in range(num_examples):
self._create_annotation_file(root, file_name_fn(idx))
def _create_annotation_file(self, root, name):
mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy())
datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict)
def test_combined_targets(self):
target_types = ["category", "annotation"]
individual_targets = []
for target_type in target_types:
with self.create_dataset(target_type=target_type) as (dataset, _):
_, target = dataset[0]
individual_targets.append(target)
with self.create_dataset(target_type=target_types) as (dataset, _):
_, combined_targets = dataset[0]
actual = len(individual_targets)
expected = len(combined_targets)
self.assertEqual(
actual,
expected,
f"The number of the returned combined targets does not match the the number targets if requested "
f"individually: {actual} != {expected}",
)
for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):
with self.subTest(target_type=target_type):
actual = type(combined_target)
expected = type(individual_target)
self.assertIs(
actual,
expected,
f"Type of the combined target does not match the type of the corresponding individual target: "
f"{actual} is not {expected}",
)
class Caltech256TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Caltech256
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir) / "caltech256" / "256_ObjectCategories"
categories = ((1, "ak47"), (127, "laptop-101"), (257, "clutter"))
num_images_per_category = 2
for idx, category in categories:
datasets_utils.create_image_folder(
tmpdir,
name=f"{idx:03d}.{category}",
file_name_fn=lambda image_idx: f"{idx:03d}_{image_idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
return num_images_per_category * len(categories)
class WIDERFaceTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.WIDERFace
FEATURE_TYPES = (PIL.Image.Image, (dict, type(None)))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val', 'test'))
def inject_fake_data(self, tmpdir, config):
widerface_dir = pathlib.Path(tmpdir) / 'widerface'
annotations_dir = widerface_dir / 'wider_face_split'
os.makedirs(annotations_dir)
split_to_idx = split_to_num_examples = {
"train": 1,
"val": 2,
"test": 3,
}
for split in ('train', 'val', 'test'):
split_idx = split_to_idx[split]
num_examples = split_to_num_examples[split]
datasets_utils.create_image_folder(
root=tmpdir,
name=widerface_dir / f'WIDER_{split}' / 'images' / '0--Parade',
file_name_fn=lambda image_idx: f"0_Parade_marchingband_1_{split_idx + image_idx}.jpg",
num_examples=num_examples,
)
annotation_file_name = {
'train': annotations_dir / 'wider_face_train_bbx_gt.txt',
'val': annotations_dir / 'wider_face_val_bbx_gt.txt',
'test': annotations_dir / 'wider_face_test_filelist.txt',
}[split]
annotation_content = {
"train": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n"
for image_idx in range(num_examples)
),
"val": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n"
for image_idx in range(num_examples)
),
"test": "".join(
f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n"
for image_idx in range(num_examples)
),
}[split]
with open(annotation_file_name, "w") as annotation_file:
annotation_file.write(annotation_content)
return split_to_num_examples[config["split"]]
class CityScapesTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Cityscapes
TARGET_TYPES = (
"instance",
"semantic",
"polygon",
"color",
)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(
mode=("fine",), split=("train", "test", "val"), target_type=TARGET_TYPES
),
*datasets_utils.combinations_grid(
mode=("coarse",),
split=("train", "train_extra", "val"),
target_type=TARGET_TYPES,
),
)
FEATURE_TYPES = (PIL.Image.Image, (dict, PIL.Image.Image))
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
mode_to_splits = {
"Coarse": ["train", "train_extra", "val"],
"Fine": ["train", "test", "val"],
}
if config["split"] == "train":
cities = ["bochum", "bremen"]
else:
cities = ["bochum"]
polygon_target = {
"imgHeight": 1024,
"imgWidth": 2048,
"objects": [
{
"label": "sky",
"polygon": [
[1241, 0],
[1234, 156],
[1478, 197],
[1611, 172],
[1606, 0],
],
},
{
"label": "road",
"polygon": [
[0, 448],
[1331, 274],
[1473, 265],
[2047, 605],
[2047, 1023],
[0, 1023],
],
},
],
}
for mode in ["Coarse", "Fine"]:
gt_dir = tmpdir / f"gt{mode}"
for split in mode_to_splits[mode]:
for city in cities:
def make_image(name, size=10):
datasets_utils.create_image_folder(
root=gt_dir / split,
name=city,
file_name_fn=lambda _: name,
size=size,
num_examples=1,
)
make_image(f"{city}_000000_000000_gt{mode}_instanceIds.png")
make_image(f"{city}_000000_000000_gt{mode}_labelIds.png")
make_image(f"{city}_000000_000000_gt{mode}_color.png", size=(4, 10, 10))
polygon_target_name = gt_dir / split / city / f"{city}_000000_000000_gt{mode}_polygons.json"
with open(polygon_target_name, "w") as outfile:
json.dump(polygon_target, outfile)
for split in ['test', 'train_extra', 'train', 'val']:
for city in cities:
datasets_utils.create_image_folder(
root=tmpdir / "leftImg8bit" / split,
name=city,
file_name_fn=lambda _: f"{city}_000000_000000_leftImg8bit.png",
num_examples=1,
)
info = {'num_examples': len(cities)}
if config['target_type'] == 'polygon':
info['expected_polygon_target'] = polygon_target
return info
def test_combined_targets(self):
target_types = ['semantic', 'polygon', 'color']
with self.create_dataset(target_type=target_types) as (dataset, _):
output = dataset[0]
self.assertTrue(isinstance(output, tuple))
self.assertTrue(len(output) == 2)
self.assertTrue(isinstance(output[0], PIL.Image.Image))
self.assertTrue(isinstance(output[1], tuple))
self.assertTrue(len(output[1]) == 3)
self.assertTrue(isinstance(output[1][0], PIL.Image.Image))
self.assertTrue(isinstance(output[1][1], dict))
self.assertTrue(isinstance(output[1][2], PIL.Image.Image))
def test_feature_types_target_color(self):
with self.create_dataset(target_type='color') as (dataset, _):
color_img, color_target = dataset[0]
self.assertTrue(isinstance(color_img, PIL.Image.Image))
self.assertTrue(np.array(color_target).shape[2] == 4)
def test_feature_types_target_polygon(self):
with self.create_dataset(target_type='polygon') as (dataset, info):
polygon_img, polygon_target = dataset[0]
self.assertTrue(isinstance(polygon_img, PIL.Image.Image))
self.assertEqual(polygon_target, info['expected_polygon_target'])
class ImageNetTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.ImageNet
REQUIRED_PACKAGES = ('scipy',)
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val'))
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
wnid = 'n01234567'
if config['split'] == 'train':
num_examples = 3
datasets_utils.create_image_folder(
root=tmpdir,
name=tmpdir / 'train' / wnid / wnid,
file_name_fn=lambda image_idx: f"{wnid}_{image_idx}.JPEG",
num_examples=num_examples,
)
else:
num_examples = 1
datasets_utils.create_image_folder(
root=tmpdir,
name=tmpdir / 'val' / wnid,
file_name_fn=lambda image_ifx: "ILSVRC2012_val_0000000{image_idx}.JPEG",
num_examples=num_examples,
)
wnid_to_classes = {wnid: [1]}
torch.save((wnid_to_classes, None), tmpdir / 'meta.bin')
return num_examples
class CIFAR10TestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CIFAR10
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
_VERSION_CONFIG = dict(
base_folder="cifar-10-batches-py",
train_files=tuple(f"data_batch_{idx}" for idx in range(1, 6)),
test_files=("test_batch",),
labels_key="labels",
meta_file="batches.meta",
num_categories=10,
categories_key="label_names",
)
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir) / self._VERSION_CONFIG["base_folder"]
os.makedirs(tmpdir)
num_images_per_file = 1
for name in itertools.chain(self._VERSION_CONFIG["train_files"], self._VERSION_CONFIG["test_files"]):
self._create_batch_file(tmpdir, name, num_images_per_file)
categories = self._create_meta_file(tmpdir)
return dict(
num_examples=num_images_per_file
* len(self._VERSION_CONFIG["train_files"] if config["train"] else self._VERSION_CONFIG["test_files"]),
categories=categories,
)
def _create_batch_file(self, root, name, num_images):
data = datasets_utils.create_image_or_video_tensor((num_images, 32 * 32 * 3))
labels = np.random.randint(0, self._VERSION_CONFIG["num_categories"], size=num_images).tolist()
self._create_binary_file(root, name, {"data": data, self._VERSION_CONFIG["labels_key"]: labels})
def _create_meta_file(self, root):
categories = [
f"{idx:0{len(str(self._VERSION_CONFIG['num_categories'] - 1))}d}"
for idx in range(self._VERSION_CONFIG["num_categories"])
]
self._create_binary_file(
root, self._VERSION_CONFIG["meta_file"], {self._VERSION_CONFIG["categories_key"]: categories}
)
return categories
def _create_binary_file(self, root, name, content):
with open(pathlib.Path(root) / name, "wb") as fh:
pickle.dump(content, fh)
def test_class_to_idx(self):
with self.create_dataset() as (dataset, info):
expected = {category: label for label, category in enumerate(info["categories"])}
actual = dataset.class_to_idx
self.assertEqual(actual, expected)
class CIFAR100(CIFAR10TestCase):
DATASET_CLASS = datasets.CIFAR100
_VERSION_CONFIG = dict(
base_folder="cifar-100-python",
train_files=("train",),
test_files=("test",),
labels_key="fine_labels",
meta_file="meta",
num_categories=100,
categories_key="fine_label_names",
)
class CelebATestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CelebA
FEATURE_TYPES = (PIL.Image.Image, (torch.Tensor, int, tuple, type(None)))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
split=("train", "valid", "test", "all"),
target_type=("attr", "identity", "bbox", "landmarks", ["attr", "identity"]),
)
REQUIRED_PACKAGES = ("pandas",)
_SPLIT_TO_IDX = dict(train=0, valid=1, test=2)
def inject_fake_data(self, tmpdir, config):
base_folder = pathlib.Path(tmpdir) / "celeba"
os.makedirs(base_folder)
num_images, num_images_per_split = self._create_split_txt(base_folder)
datasets_utils.create_image_folder(
base_folder, "img_align_celeba", lambda idx: f"{idx + 1:06d}.jpg", num_images
)
attr_names = self._create_attr_txt(base_folder, num_images)
self._create_identity_txt(base_folder, num_images)
self._create_bbox_txt(base_folder, num_images)
self._create_landmarks_txt(base_folder, num_images)
return dict(num_examples=num_images_per_split[config["split"]], attr_names=attr_names)
def _create_split_txt(self, root):
num_images_per_split = dict(train=3, valid=2, test=1)
data = [
[self._SPLIT_TO_IDX[split]] for split, num_images in num_images_per_split.items() for _ in range(num_images)
]
self._create_txt(root, "list_eval_partition.txt", data)
num_images_per_split["all"] = num_images = sum(num_images_per_split.values())
return num_images, num_images_per_split
def _create_attr_txt(self, root, num_images):
header = ("5_o_Clock_Shadow", "Young")
data = torch.rand((num_images, len(header))).ge(0.5).int().mul(2).sub(1).tolist()
self._create_txt(root, "list_attr_celeba.txt", data, header=header, add_num_examples=True)
return header
def _create_identity_txt(self, root, num_images):
data = torch.randint(1, 4, size=(num_images, 1)).tolist()
self._create_txt(root, "identity_CelebA.txt", data)
def _create_bbox_txt(self, root, num_images):
header = ("x_1", "y_1", "width", "height")
data = torch.randint(10, size=(num_images, len(header))).tolist()
self._create_txt(
root, "list_bbox_celeba.txt", data, header=header, add_num_examples=True, add_image_id_to_header=True
)
def _create_landmarks_txt(self, root, num_images):
header = ("lefteye_x", "rightmouth_y")
data = torch.randint(10, size=(num_images, len(header))).tolist()
self._create_txt(root, "list_landmarks_align_celeba.txt", data, header=header, add_num_examples=True)
def _create_txt(self, root, name, data, header=None, add_num_examples=False, add_image_id_to_header=False):
with open(pathlib.Path(root) / name, "w") as fh:
if add_num_examples:
fh.write(f"{len(data)}\n")
if header:
if add_image_id_to_header:
header = ("image_id", *header)
fh.write(f"{' '.join(header)}\n")
for idx, line in enumerate(data, 1):
fh.write(f"{' '.join((f'{idx:06d}.jpg', *[str(value) for value in line]))}\n")
def test_combined_targets(self):
target_types = ["attr", "identity", "bbox", "landmarks"]
individual_targets = []
for target_type in target_types:
with self.create_dataset(target_type=target_type) as (dataset, _):
_, target = dataset[0]
individual_targets.append(target)
with self.create_dataset(target_type=target_types) as (dataset, _):
_, combined_targets = dataset[0]
actual = len(individual_targets)
expected = len(combined_targets)
self.assertEqual(
actual,
expected,
f"The number of the returned combined targets does not match the the number targets if requested "
f"individually: {actual} != {expected}",
)
for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):
with self.subTest(target_type=target_type):
actual = type(combined_target)
expected = type(individual_target)
self.assertIs(
actual,
expected,
f"Type of the combined target does not match the type of the corresponding individual target: "
f"{actual} is not {expected}",
)
def test_no_target(self):
with self.create_dataset(target_type=[]) as (dataset, _):
_, target = dataset[0]
self.assertIsNone(target)
def test_attr_names(self):
with self.create_dataset() as (dataset, info):
self.assertEqual(tuple(dataset.attr_names), info["attr_names"])
class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.VOCSegmentation
FEATURE_TYPES = (PIL.Image.Image, PIL.Image.Image)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(
year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval")
),
dict(year="2007", image_set="test"),
dict(year="2007-test", image_set="test"),
)
def inject_fake_data(self, tmpdir, config):
year, is_test_set = (
("2007", True)
if config["year"] == "2007-test" or config["image_set"] == "test"
else (config["year"], False)
)
image_set = config["image_set"]
base_dir = pathlib.Path(tmpdir)
if year == "2011":
base_dir /= "TrainVal"
base_dir = base_dir / "VOCdevkit" / f"VOC{year}"
os.makedirs(base_dir)
num_images, num_images_per_image_set = self._create_image_set_files(base_dir, "ImageSets", is_test_set)
datasets_utils.create_image_folder(base_dir, "JPEGImages", lambda idx: f"{idx:06d}.jpg", num_images)
datasets_utils.create_image_folder(base_dir, "SegmentationClass", lambda idx: f"{idx:06d}.png", num_images)
annotation = self._create_annotation_files(base_dir, "Annotations", num_images)
return dict(num_examples=num_images_per_image_set[image_set], annotation=annotation)
def _create_image_set_files(self, root, name, is_test_set):
root = pathlib.Path(root) / name
src = pathlib.Path(root) / "Main"
os.makedirs(src, exist_ok=True)
idcs = dict(train=(0, 1, 2), val=(3, 4), test=(5,))
idcs["trainval"] = (*idcs["train"], *idcs["val"])
for image_set in ("test",) if is_test_set else ("train", "val", "trainval"):
self._create_image_set_file(src, image_set, idcs[image_set])
shutil.copytree(src, root / "Segmentation")
num_images = max(itertools.chain(*idcs.values())) + 1
num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()])
return num_images, num_images_per_image_set
def _create_image_set_file(self, root, image_set, idcs):
with open(pathlib.Path(root) / f"{image_set}.txt", "w") as fh:
fh.writelines([f"{idx:06d}\n" for idx in idcs])
def _create_annotation_files(self, root, name, num_images):
root = pathlib.Path(root) / name
os.makedirs(root)
for idx in range(num_images):
annotation = self._create_annotation_file(root, f"{idx:06d}.xml")
return annotation
def _create_annotation_file(self, root, name):
def add_child(parent, name, text=None):
child = ET.SubElement(parent, name)
child.text = text
return child
def add_name(obj, name="dog"):
add_child(obj, "name", name)
return name
def add_bndbox(obj, bndbox=None):
if bndbox is None:
bndbox = {"xmin": "1", "xmax": "2", "ymin": "3", "ymax": "4"}
obj = add_child(obj, "bndbox")
for name, text in bndbox.items():
add_child(obj, name, text)
return bndbox
annotation = ET.Element("annotation")
obj = add_child(annotation, "object")
data = dict(name=add_name(obj), bndbox=add_bndbox(obj))
with open(pathlib.Path(root) / name, "wb") as fh:
fh.write(ET.tostring(annotation))
return data
class VOCDetectionTestCase(VOCSegmentationTestCase):
DATASET_CLASS = datasets.VOCDetection
FEATURE_TYPES = (PIL.Image.Image, dict)
def test_annotations(self):
with self.create_dataset() as (dataset, info):
_, target = dataset[0]
self.assertIn("annotation", target)
annotation = target["annotation"]
self.assertIn("object", annotation)
objects = annotation["object"]
self.assertEqual(len(objects), 1)
object = objects[0]
self.assertEqual(object, info["annotation"])
class CocoDetectionTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.CocoDetection
FEATURE_TYPES = (PIL.Image.Image, list)
REQUIRED_PACKAGES = ("pycocotools",)
_IMAGE_FOLDER = "images"
_ANNOTATIONS_FOLDER = "annotations"
_ANNOTATIONS_FILE = "annotations.json"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._IMAGE_FOLDER
annotation_file = tmpdir / self._ANNOTATIONS_FOLDER / self._ANNOTATIONS_FILE
return root, annotation_file
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
num_images = 3
num_annotations_per_image = 2
files = datasets_utils.create_image_folder(
tmpdir, name=self._IMAGE_FOLDER, file_name_fn=lambda idx: f"{idx:012d}.jpg", num_examples=num_images
)
file_names = [file.relative_to(tmpdir / self._IMAGE_FOLDER) for file in files]
annotation_folder = tmpdir / self._ANNOTATIONS_FOLDER
os.makedirs(annotation_folder)
info = self._create_annotation_file(
annotation_folder, self._ANNOTATIONS_FILE, file_names, num_annotations_per_image
)
info["num_examples"] = num_images
return info
def _create_annotation_file(self, root, name, file_names, num_annotations_per_image):
image_ids = [int(file_name.stem) for file_name in file_names]
images = [dict(file_name=str(file_name), id=id) for file_name, id in zip(file_names, image_ids)]
annotations, info = self._create_annotations(image_ids, num_annotations_per_image)
self._create_json(root, name, dict(images=images, annotations=annotations))
return info
def _create_annotations(self, image_ids, num_annotations_per_image):
annotations = datasets_utils.combinations_grid(
image_id=image_ids, bbox=([1.0, 2.0, 3.0, 4.0],) * num_annotations_per_image
)
for id, annotation in enumerate(annotations):
annotation["id"] = id
return annotations, dict()
def _create_json(self, root, name, content):
file = pathlib.Path(root) / name
with open(file, "w") as fh:
json.dump(content, fh)
return file
class CocoCaptionsTestCase(CocoDetectionTestCase):
DATASET_CLASS = datasets.CocoCaptions
def _create_annotations(self, image_ids, num_annotations_per_image):
captions = [str(idx) for idx in range(num_annotations_per_image)]
annotations = datasets_utils.combinations_grid(image_id=image_ids, caption=captions)
for id, annotation in enumerate(annotations):
annotation["id"] = id
return annotations, dict(captions=captions)
def test_captions(self):
with self.create_dataset() as (dataset, info):
_, captions = dataset[0]
self.assertEqual(tuple(captions), tuple(info["captions"]))
class UCF101TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.UCF101
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))
_VIDEO_FOLDER = "videos"
_ANNOTATIONS_FOLDER = "annotations"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._VIDEO_FOLDER
annotation_path = tmpdir / self._ANNOTATIONS_FOLDER
return root, annotation_path
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
video_folder = tmpdir / self._VIDEO_FOLDER
os.makedirs(video_folder)
video_files = self._create_videos(video_folder)
annotations_folder = tmpdir / self._ANNOTATIONS_FOLDER
os.makedirs(annotations_folder)
num_examples = self._create_annotation_files(annotations_folder, video_files, config["fold"], config["train"])
return num_examples
def _create_videos(self, root, num_examples_per_class=3):
def file_name_fn(cls, idx, clips_per_group=2):
return f"v_{cls}_g{(idx // clips_per_group) + 1:02d}_c{(idx % clips_per_group) + 1:02d}.avi"
video_files = [
datasets_utils.create_video_folder(root, cls, lambda idx: file_name_fn(cls, idx), num_examples_per_class)
for cls in ("ApplyEyeMakeup", "YoYo")
]
return [path.relative_to(root) for path in itertools.chain(*video_files)]
def _create_annotation_files(self, root, video_files, fold, train):
current_videos = random.sample(video_files, random.randrange(1, len(video_files) - 1))
current_annotation = self._annotation_file_name(fold, train)
self._create_annotation_file(root, current_annotation, current_videos)
other_videos = set(video_files) - set(current_videos)
other_annotations = [
self._annotation_file_name(fold, train) for fold, train in itertools.product((1, 2, 3), (True, False))
]
other_annotations.remove(current_annotation)
for name in other_annotations:
self._create_annotation_file(root, name, other_videos)
return len(current_videos)
def _annotation_file_name(self, fold, train):
return f"{'train' if train else 'test'}list{fold:02d}.txt"
def _create_annotation_file(self, root, name, video_files):
with open(pathlib.Path(root) / name, "w") as fh:
fh.writelines(f"{file}\n" for file in sorted(video_files))
class LSUNTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.LSUN
REQUIRED_PACKAGES = ("lmdb",)
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
classes=("train", "test", "val", ["bedroom_train", "church_outdoor_train"])
)
_CATEGORIES = (
"bedroom",
"bridge",
"church_outdoor",
"classroom",
"conference_room",
"dining_room",
"kitchen",
"living_room",
"restaurant",
"tower",
)
def inject_fake_data(self, tmpdir, config):
root = pathlib.Path(tmpdir)
num_images = 0
for cls in self._parse_classes(config["classes"]):
num_images += self._create_lmdb(root, cls)
return num_images
@contextlib.contextmanager
def create_dataset(
self,
*args, **kwargs
):
with super().create_dataset(*args, **kwargs) as output:
yield output
for file in os.listdir(os.getcwd()):
if file.startswith("_cache_"):
os.remove(file)
def _parse_classes(self, classes):
if not isinstance(classes, str):
return classes
split = classes
if split == "test":
return [split]
return [f"{category}_{split}" for category in self._CATEGORIES]
def _create_lmdb(self, root, cls):
lmdb = datasets_utils.lazy_importer.lmdb
hexdigits_lowercase = string.digits + string.ascii_lowercase[:6]
folder = f"{cls}_lmdb"
num_images = torch.randint(1, 4, size=()).item()
format = "png"
files = datasets_utils.create_image_folder(root, folder, lambda idx: f"{idx}.{format}", num_images)
with lmdb.open(str(root / folder)) as env, env.begin(write=True) as txn:
for file in files:
key = "".join(random.choice(hexdigits_lowercase) for _ in range(40)).encode()
buffer = io.BytesIO()
Image.open(file).save(buffer, format)
buffer.seek(0)
value = buffer.read()
txn.put(key, value)
os.remove(file)
return num_images
def test_not_found_or_corrupted(self):
with self.assertRaises(datasets_utils.lazy_importer.lmdb.Error):
super().test_not_found_or_corrupted()
class Kinetics400TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.Kinetics400
def inject_fake_data(self, tmpdir, config):
classes = ("Abseiling", "Zumba")
num_videos_per_class = 2
digits = string.ascii_letters + string.digits + "-_"
for cls in classes:
datasets_utils.create_video_folder(
tmpdir,
cls,
lambda _: f"{datasets_utils.create_random_string(11, digits)}.avi",
num_videos_per_class,
)
return num_videos_per_class * len(classes)
class HMDB51TestCase(datasets_utils.VideoDatasetTestCase):
DATASET_CLASS = datasets.HMDB51
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))
_VIDEO_FOLDER = "videos"
_SPLITS_FOLDER = "splits"
_CLASSES = ("brush_hair", "wave")
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._VIDEO_FOLDER
annotation_path = tmpdir / self._SPLITS_FOLDER
return root, annotation_path
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
video_folder = tmpdir / self._VIDEO_FOLDER
os.makedirs(video_folder)
video_files = self._create_videos(video_folder)
splits_folder = tmpdir / self._SPLITS_FOLDER
os.makedirs(splits_folder)
num_examples = self._create_split_files(splits_folder, video_files, config["fold"], config["train"])
return num_examples
def _create_videos(self, root, num_examples_per_class=3):
def file_name_fn(cls, idx, clips_per_group=2):
return f"{cls}_{(idx // clips_per_group) + 1:d}_{(idx % clips_per_group) + 1:d}.avi"
return [
(
cls,
datasets_utils.create_video_folder(
root,
cls,
lambda idx: file_name_fn(cls, idx),
num_examples_per_class,
),
)
for cls in self._CLASSES
]
def _create_split_files(self, root, video_files, fold, train):
num_videos = num_train_videos = 0
for cls, videos in video_files:
num_videos += len(videos)
train_videos = set(random.sample(videos, random.randrange(1, len(videos) - 1)))
num_train_videos += len(train_videos)
with open(pathlib.Path(root) / f"{cls}_test_split{fold}.txt", "w") as fh:
fh.writelines(f"{file.name} {1 if file in train_videos else 2}\n" for file in videos)
return num_train_videos if train else (num_videos - num_train_videos)
class OmniglotTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Omniglot
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(background=(True, False))
def inject_fake_data(self, tmpdir, config):
target_folder = (
pathlib.Path(tmpdir) / "omniglot-py" / f"images_{'background' if config['background'] else 'evaluation'}"
)
os.makedirs(target_folder)
num_images = 0
for name in ("Alphabet_of_the_Magi", "Tifinagh"):
num_images += self._create_alphabet_folder(target_folder, name)
return num_images
def _create_alphabet_folder(self, root, name):
num_images_total = 0
for idx in range(torch.randint(1, 4, size=()).item()):
num_images = torch.randint(1, 4, size=()).item()
num_images_total += num_images
datasets_utils.create_image_folder(
root / name, f"character{idx:02d}", lambda image_idx: f"{image_idx:02d}.png", num_images
)
return num_images_total
class SBUTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SBU
FEATURE_TYPES = (PIL.Image.Image, str)
def inject_fake_data(self, tmpdir, config):
num_images = 3
dataset_folder = pathlib.Path(tmpdir) / "dataset"
images = datasets_utils.create_image_folder(tmpdir, "dataset", self._create_file_name, num_images)
self._create_urls_txt(dataset_folder, images)
self._create_captions_txt(dataset_folder, num_images)
return num_images
def _create_file_name(self, idx):
part1 = datasets_utils.create_random_string(10, string.digits)
part2 = datasets_utils.create_random_string(10, string.ascii_lowercase, string.digits[:6])
return f"{part1}_{part2}.jpg"
def _create_urls_txt(self, root, images):
with open(root / "SBU_captioned_photo_dataset_urls.txt", "w") as fh:
for image in images:
fh.write(
f"http://static.flickr.com/{datasets_utils.create_random_string(4, string.digits)}/{image.name}\n"
)
def _create_captions_txt(self, root, num_images):
with open(root / "SBU_captioned_photo_dataset_captions.txt", "w") as fh:
for _ in range(num_images):
fh.write(f"{datasets_utils.create_random_string(10)}\n")
class SEMEIONTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SEMEION
def inject_fake_data(self, tmpdir, config):
num_images = 3
images = torch.rand(num_images, 256)
labels = F.one_hot(torch.randint(10, size=(num_images,)))
with open(pathlib.Path(tmpdir) / "semeion.data", "w") as fh:
for image, one_hot_labels in zip(images, labels):
image_columns = " ".join([f"{pixel.item():.4f}" for pixel in image])
labels_columns = " ".join([str(label.item()) for label in one_hot_labels])
fh.write(f"{image_columns} {labels_columns}\n")
return num_images
class USPSTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.USPS
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
def inject_fake_data(self, tmpdir, config):
num_images = 2 if config["train"] else 1
images = torch.rand(num_images, 256) * 2 - 1
labels = torch.randint(1, 11, size=(num_images,))
with bz2.open(pathlib.Path(tmpdir) / f"usps{'.t' if not config['train'] else ''}.bz2", "w") as fh:
for image, label in zip(images, labels):
line = " ".join((str(label.item()), *[f"{idx}:{pixel:.6f}" for idx, pixel in enumerate(image, 1)]))
fh.write(f"{line}\n".encode())
return num_images
class SBDatasetTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.SBDataset
FEATURE_TYPES = (PIL.Image.Image, (np.ndarray, PIL.Image.Image))
REQUIRED_PACKAGES = ("scipy.io", "scipy.sparse")
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
image_set=("train", "val", "train_noval"), mode=("boundaries", "segmentation")
)
_NUM_CLASSES = 20
def inject_fake_data(self, tmpdir, config):
num_images, num_images_per_image_set = self._create_split_files(tmpdir)
sizes = self._create_target_folder(tmpdir, "cls", num_images)
datasets_utils.create_image_folder(
tmpdir, "img", lambda idx: f"{self._file_stem(idx)}.jpg", num_images, size=lambda idx: sizes[idx]
)
return num_images_per_image_set[config["image_set"]]
def _create_split_files(self, root):
root = pathlib.Path(root)
splits = dict(train=(0, 1, 2), train_noval=(0, 2), val=(3,))
for split, idcs in splits.items():
self._create_split_file(root, split, idcs)
num_images = max(itertools.chain(*splits.values())) + 1
num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()])
return num_images, num_images_per_split
def _create_split_file(self, root, name, idcs):
with open(root / f"{name}.txt", "w") as fh:
fh.writelines(f"{self._file_stem(idx)}\n" for idx in idcs)
def _create_target_folder(self, root, name, num_images):
io = datasets_utils.lazy_importer.scipy.io
target_folder = pathlib.Path(root) / name
os.makedirs(target_folder)
sizes = [torch.randint(1, 4, size=(2,)).tolist() for _ in range(num_images)]
for idx, size in enumerate(sizes):
content = dict(
GTcls=dict(Boundaries=self._create_boundaries(size), Segmentation=self._create_segmentation(size))
)
io.savemat(target_folder / f"{self._file_stem(idx)}.mat", content)
return sizes
def _create_boundaries(self, size):
sparse = datasets_utils.lazy_importer.scipy.sparse
return [
[sparse.csc_matrix(torch.randint(0, 2, size=size, dtype=torch.uint8).numpy())]
for _ in range(self._NUM_CLASSES)
]
def _create_segmentation(self, size):
return torch.randint(0, self._NUM_CLASSES + 1, size=size, dtype=torch.uint8).numpy()
def _file_stem(self, idx):
return f"2008_{idx:06d}"
class FakeDataTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.FakeData
FEATURE_TYPES = (PIL.Image.Image, int)
def dataset_args(self, tmpdir, config):
return ()
def inject_fake_data(self, tmpdir, config):
return config["size"]
def test_not_found_or_corrupted(self):
self.skipTest("The data is generated at creation and thus cannot be non-existent or corrupted.")
class PhotoTourTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.PhotoTour
FEATURE_TYPES = ()
_TRAIN_FEATURE_TYPES = (torch.Tensor,)
_TEST_FEATURE_TYPES = (torch.Tensor, torch.Tensor, torch.Tensor)
datasets_utils.combinations_grid(train=(True, False))
_NAME = "liberty"
def dataset_args(self, tmpdir, config):
return tmpdir, self._NAME
def inject_fake_data(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
num_patches = 5
image_files = self._create_images(tmpdir, self._NAME, num_patches)
point_ids, info_file = self._create_info_file(tmpdir / self._NAME, num_patches)
num_matches, matches_file = self._create_matches_file(tmpdir / self._NAME, num_patches, point_ids)
self._create_archive(tmpdir, self._NAME, *image_files, info_file, matches_file)
return num_patches if config["train"] else num_matches
def _create_images(self, root, name, num_images):
return datasets_utils.create_image_folder(
root, name, lambda idx: f"patches{idx:04d}.bmp", num_images, size=(1, 64, 64)
)
def _create_info_file(self, root, num_images):
point_ids = torch.randint(num_images, size=(num_images,)).tolist()
file = root / "info.txt"
with open(file, "w") as fh:
fh.writelines([f"{point_id} 0\n" for point_id in point_ids])
return point_ids, file
def _create_matches_file(self, root, num_patches, point_ids):
lines = [
f"{patch_id1} {point_ids[patch_id1]} 0 {patch_id2} {point_ids[patch_id2]} 0\n"
for patch_id1, patch_id2 in itertools.combinations(range(num_patches), 2)
]
file = root / "m50_100000_100000_0.txt"
with open(file, "w") as fh:
fh.writelines(lines)
return len(lines), file
def _create_archive(self, root, name, *files):
archive = root / f"{name}.zip"
with zipfile.ZipFile(archive, "w") as zip:
for file in files:
zip.write(file, arcname=file.relative_to(root))
return archive
@datasets_utils.test_all_configs
def test_feature_types(self, config):
feature_types = self.FEATURE_TYPES
self.FEATURE_TYPES = self._TRAIN_FEATURE_TYPES if config["train"] else self._TEST_FEATURE_TYPES
try:
super().test_feature_types.__wrapped__(self, config)
finally:
self.FEATURE_TYPES = feature_types
class Flickr8kTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Flickr8k
FEATURE_TYPES = (PIL.Image.Image, list)
_IMAGES_FOLDER = "images"
_ANNOTATIONS_FILE = "captions.html"
def dataset_args(self, tmpdir, config):
tmpdir = pathlib.Path(tmpdir)
root = tmpdir / self._IMAGES_FOLDER
ann_file = tmpdir / self._ANNOTATIONS_FILE
return str(root), str(ann_file)
def inject_fake_data(self, tmpdir, config):
num_images = 3
num_captions_per_image = 3
tmpdir = pathlib.Path(tmpdir)
images = self._create_images(tmpdir, self._IMAGES_FOLDER, num_images)
self._create_annotations_file(tmpdir, self._ANNOTATIONS_FILE, images, num_captions_per_image)
return dict(num_examples=num_images, captions=self._create_captions(num_captions_per_image))
def _create_images(self, root, name, num_images):
return datasets_utils.create_image_folder(root, name, self._image_file_name, num_images)
def _image_file_name(self, idx):
id = datasets_utils.create_random_string(10, string.digits)
checksum = datasets_utils.create_random_string(10, string.digits, string.ascii_lowercase[:6])
size = datasets_utils.create_random_string(1, "qwcko")
return f"{id}_{checksum}_{size}.jpg"
def _create_annotations_file(self, root, name, images, num_captions_per_image):
with open(root / name, "w") as fh:
fh.write("<table>")
for image in (None, *images):
self._add_image(fh, image, num_captions_per_image)
fh.write("</table>")
def _add_image(self, fh, image, num_captions_per_image):
fh.write("<tr>")
self._add_image_header(fh, image)
fh.write("</tr><tr><td><ul>")
self._add_image_captions(fh, num_captions_per_image)
fh.write("</ul></td></tr>")
def _add_image_header(self, fh, image=None):
if image:
url = f"http://www.flickr.com/photos/user/{image.name.split('_')[0]}/"
data = f'<a href="{url}">{url}</a>'
else:
data = "Image Not Found"
fh.write(f"<td>{data}</td>")
def _add_image_captions(self, fh, num_captions_per_image):
for caption in self._create_captions(num_captions_per_image):
fh.write(f"<li>{caption}")
def _create_captions(self, num_captions_per_image):
return [str(idx) for idx in range(num_captions_per_image)]
def test_captions(self):
with self.create_dataset() as (dataset, info):
_, captions = dataset[0]
self.assertSequenceEqual(captions, info["captions"])
class Flickr30kTestCase(Flickr8kTestCase):
DATASET_CLASS = datasets.Flickr30k
FEATURE_TYPES = (PIL.Image.Image, list)
_ANNOTATIONS_FILE = "captions.token"
def _image_file_name(self, idx):
return f"{idx}.jpg"
def _create_annotations_file(self, root, name, images, num_captions_per_image):
with open(root / name, "w") as fh:
for image, (idx, caption) in itertools.product(
images, enumerate(self._create_captions(num_captions_per_image))
):
fh.write(f"{image.name}#{idx}\t{caption}\n")
class MNISTTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.MNIST
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
_MAGIC_DTYPES = {
torch.uint8: 8,
torch.int8: 9,
torch.int16: 11,
torch.int32: 12,
torch.float32: 13,
torch.float64: 14,
}
_IMAGES_SIZE = (28, 28)
_IMAGES_DTYPE = torch.uint8
_LABELS_SIZE = ()
_LABELS_DTYPE = torch.uint8
def inject_fake_data(self, tmpdir, config):
raw_dir = pathlib.Path(tmpdir) / self.DATASET_CLASS.__name__ / "raw"
os.makedirs(raw_dir, exist_ok=True)
num_images = self._num_images(config)
self._create_binary_file(
raw_dir, self._images_file(config), (num_images, *self._IMAGES_SIZE), self._IMAGES_DTYPE
)
self._create_binary_file(
raw_dir, self._labels_file(config), (num_images, *self._LABELS_SIZE), self._LABELS_DTYPE
)
return num_images
def _num_images(self, config):
return 2 if config["train"] else 1
def _images_file(self, config):
return f"{self._prefix(config)}-images-idx3-ubyte"
def _labels_file(self, config):
return f"{self._prefix(config)}-labels-idx1-ubyte"
def _prefix(self, config):
return "train" if config["train"] else "t10k"
def _create_binary_file(self, root, filename, size, dtype):
with open(pathlib.Path(root) / filename, "wb") as fh:
for meta in (self._magic(dtype, len(size)), *size):
fh.write(self._encode(meta))
data = torch.randint(0, torch.iinfo(dtype).max + 1, size, dtype=dtype)
fh.write(data.numpy().tobytes())
def _magic(self, dtype, dims):
return self._MAGIC_DTYPES[dtype] * 256 + dims
def _encode(self, v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
class FashionMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.FashionMNIST
class KMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.KMNIST
class EMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.EMNIST
DEFAULT_CONFIG = dict(split="byclass")
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(
split=("byclass", "bymerge", "balanced", "letters", "digits", "mnist"), train=(True, False)
)
def _prefix(self, config):
return f"emnist-{config['split']}-{'train' if config['train'] else 'test'}"
class QMNISTTestCase(MNISTTestCase):
DATASET_CLASS = datasets.QMNIST
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(what=("train", "test", "test10k", "nist"))
_LABELS_SIZE = (8,)
_LABELS_DTYPE = torch.int32
def _num_images(self, config):
if config["what"] == "nist":
return 3
elif config["what"] == "train":
return 2
elif config["what"] == "test50k":
return 10001
else:
return 1
def _labels_file(self, config):
return f"{self._prefix(config)}-labels-idx2-int"
def _prefix(self, config):
if config["what"] == "nist":
return "xnist"
if config["what"] is None:
what = "train" if config["train"] else "test"
elif config["what"].startswith("test"):
what = "test"
else:
what = config["what"]
return f"qmnist-{what}"
def test_num_examples_test50k(self):
with self.create_dataset(what="test50k") as (dataset, info):
self.assertEqual(len(dataset), info["num_examples"] - 10000)
class DatasetFolderTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.DatasetFolder
FEATURE_TYPES = (str, int)
_IMAGE_EXTENSIONS = ("jpg", "png")
_VIDEO_EXTENSIONS = ("avi", "mp4")
_EXTENSIONS = (*_IMAGE_EXTENSIONS, *_VIDEO_EXTENSIONS)
DEFAULT_CONFIG = dict(extensions=_EXTENSIONS)
ADDITIONAL_CONFIGS = (
*datasets_utils.combinations_grid(extensions=[(ext,) for ext in _IMAGE_EXTENSIONS]),
dict(extensions=_IMAGE_EXTENSIONS),
*datasets_utils.combinations_grid(extensions=[(ext,) for ext in _VIDEO_EXTENSIONS]),
dict(extensions=_VIDEO_EXTENSIONS),
)
def dataset_args(self, tmpdir, config):
return tmpdir, lambda x: x
def inject_fake_data(self, tmpdir, config):
extensions = config["extensions"] or self._is_valid_file_to_extensions(config["is_valid_file"])
num_examples_total = 0
classes = []
for ext, cls in zip(self._EXTENSIONS, string.ascii_letters):
if ext not in extensions:
continue
create_example_folder = (
datasets_utils.create_image_folder
if ext in self._IMAGE_EXTENSIONS
else datasets_utils.create_video_folder
)
num_examples = torch.randint(1, 3, size=()).item()
create_example_folder(tmpdir, cls, lambda idx: self._file_name_fn(cls, ext, idx), num_examples)
num_examples_total += num_examples
classes.append(cls)
return dict(num_examples=num_examples_total, classes=classes)
def _file_name_fn(self, cls, ext, idx):
return f"{cls}_{idx}.{ext}"
def _is_valid_file_to_extensions(self, is_valid_file):
return {ext for ext in self._EXTENSIONS if is_valid_file(f"foo.{ext}")}
@datasets_utils.test_all_configs
def test_is_valid_file(self, config):
extensions = config.pop("extensions")
with self.create_dataset(
config, extensions=None, is_valid_file=lambda file: pathlib.Path(file).suffix[1:] in extensions
) as (dataset, info):
self.assertEqual(len(dataset), info["num_examples"])
@datasets_utils.test_all_configs
def test_classes(self, config):
with self.create_dataset(config) as (dataset, info):
self.assertSequenceEqual(dataset.classes, info["classes"])
class ImageFolderTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.ImageFolder
def inject_fake_data(self, tmpdir, config):
num_examples_total = 0
classes = ("a", "b")
for cls in classes:
num_examples = torch.randint(1, 3, size=()).item()
num_examples_total += num_examples
datasets_utils.create_image_folder(tmpdir, cls, lambda idx: f"{cls}_{idx}.png", num_examples)
return dict(num_examples=num_examples_total, classes=classes)
@datasets_utils.test_all_configs
def test_classes(self, config):
with self.create_dataset(config) as (dataset, info):
self.assertSequenceEqual(dataset.classes, info["classes"])
class KittiTestCase(datasets_utils.ImageDatasetTestCase):
DATASET_CLASS = datasets.Kitti
FEATURE_TYPES = (PIL.Image.Image, (list, type(None)))
ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))
def inject_fake_data(self, tmpdir, config):
kitti_dir = os.path.join(tmpdir, "Kitti", "raw")
os.makedirs(kitti_dir)
split_to_num_examples = {
True: 1,
False: 2,
}
for is_training in (True, False):
num_examples = split_to_num_examples[is_training]
datasets_utils.create_image_folder(
root=kitti_dir,
name=os.path.join("training" if is_training else "testing", "image_2"),
file_name_fn=lambda image_idx: f"{image_idx:06d}.png",
num_examples=num_examples,
)
if is_training:
for image_idx in range(num_examples):
target_file_dir = os.path.join(kitti_dir, "training", "label_2")
os.makedirs(target_file_dir)
target_file_name = os.path.join(target_file_dir, f"{image_idx:06d}.txt")
target_contents = "Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01\n"
with open(target_file_name, "w") as target_file:
target_file.write(target_contents)
return split_to_num_examples[config["train"]]
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c45af2d6128c89098abeaec9ca933517547a304
| 2,864
|
py
|
Python
|
tests/functional/test_email_address.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 8
|
2019-10-02T12:47:15.000Z
|
2021-12-15T14:29:37.000Z
|
tests/functional/test_email_address.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 109
|
2019-09-19T13:37:43.000Z
|
2022-03-28T07:08:50.000Z
|
tests/functional/test_email_address.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 4
|
2019-10-26T19:57:20.000Z
|
2021-06-24T14:32:37.000Z
|
import pytest
from tartiflette import Resolver, create_engine
@pytest.mark.asyncio
async def test_email_address_ok():
@Resolver("Query.email", schema_name="test_email_address_ok")
async def email_resolver(*_args, **_kwargs):
return "alice.girardguittard@dm.com"
sdl = """
type Query {
email: EmailAddress
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_ok",
)
assert await engine.execute("query email { email }") == {
"data": {"email": "alice.girardguittard@dm.com"}
}
@pytest.mark.asyncio
async def test_email_address_nok():
@Resolver("Query.email", schema_name="test_email_address_nok")
async def email_resolver(*_args, **_kwargs):
return "nope"
sdl = """
type Query {
email: EmailAddress
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_nok",
)
result = await engine.execute("query email { email }")
assert result["data"]["email"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "Value is not a valid email address: < nope >"
)
@pytest.mark.asyncio
async def test_email_address_mutation_ok():
@Resolver("Mutation.email", schema_name="test_email_address_mutation_ok")
async def email_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
email: EmailAddress
}
type Mutation {
email(input: EmailAddress): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_mutation_ok",
)
assert await engine.execute(
'mutation email { email(input:"alice.girardguittard@dailymotion.com") }'
) == {"data": {"email": True}}
@pytest.mark.asyncio
async def test_email_address_mutation_nok():
@Resolver("Mutation.email", schema_name="test_email_address_mutation_nok")
async def email_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
email: EmailAddress
}
type Mutation {
email(input: EmailAddress): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_mutation_nok",
)
result = await engine.execute('mutation email { email(input:"nok") }')
assert result["data"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "Value nok is not of correct type EmailAddress"
)
| 25.571429
| 80
| 0.623953
|
import pytest
from tartiflette import Resolver, create_engine
@pytest.mark.asyncio
async def test_email_address_ok():
@Resolver("Query.email", schema_name="test_email_address_ok")
async def email_resolver(*_args, **_kwargs):
return "alice.girardguittard@dm.com"
sdl = """
type Query {
email: EmailAddress
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_ok",
)
assert await engine.execute("query email { email }") == {
"data": {"email": "alice.girardguittard@dm.com"}
}
@pytest.mark.asyncio
async def test_email_address_nok():
@Resolver("Query.email", schema_name="test_email_address_nok")
async def email_resolver(*_args, **_kwargs):
return "nope"
sdl = """
type Query {
email: EmailAddress
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_nok",
)
result = await engine.execute("query email { email }")
assert result["data"]["email"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "Value is not a valid email address: < nope >"
)
@pytest.mark.asyncio
async def test_email_address_mutation_ok():
@Resolver("Mutation.email", schema_name="test_email_address_mutation_ok")
async def email_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
email: EmailAddress
}
type Mutation {
email(input: EmailAddress): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_mutation_ok",
)
assert await engine.execute(
'mutation email { email(input:"alice.girardguittard@dailymotion.com") }'
) == {"data": {"email": True}}
@pytest.mark.asyncio
async def test_email_address_mutation_nok():
@Resolver("Mutation.email", schema_name="test_email_address_mutation_nok")
async def email_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
email: EmailAddress
}
type Mutation {
email(input: EmailAddress): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_email_address_mutation_nok",
)
result = await engine.execute('mutation email { email(input:"nok") }')
assert result["data"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "Value nok is not of correct type EmailAddress"
)
| true
| true
|
1c45af5f2860e383958cbd656df2e212b922f313
| 3,327
|
py
|
Python
|
tests/parse/test_parse_reference.py
|
wbknez/breakdb
|
f783820425c8cb70d8caedc6f5839a72de7c945e
|
[
"Apache-2.0"
] | 1
|
2020-02-03T18:31:20.000Z
|
2020-02-03T18:31:20.000Z
|
tests/parse/test_parse_reference.py
|
wbknez/breakdb
|
f783820425c8cb70d8caedc6f5839a72de7c945e
|
[
"Apache-2.0"
] | null | null | null |
tests/parse/test_parse_reference.py
|
wbknez/breakdb
|
f783820425c8cb70d8caedc6f5839a72de7c945e
|
[
"Apache-2.0"
] | null | null | null |
"""
Contains unit tests to ensure that all functions involved in parsing DICOM
references work as intended.
"""
import pytest
from breakdb.parse import has_reference, parse_reference
from breakdb.tag import ReferenceTag, get_tag_at, MalformedSequence, \
MissingSequence, MissingTag
from tests.helpers.tag import match
class TestParseReference:
"""
Test suite for :function: 'has_reference' and :function: 'parse_reference'.
"""
def test_has_reference_is_false_when_reference_is_missing(self,
create_dataset):
ds = create_dataset(excludes=[ReferenceTag.SEQUENCE])
assert not has_reference(ds)
def test_has_reference_is_false_when_no_references_exist(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value].value[0]
assert not has_reference(ds)
def test_has_reference_succeeds(self, create_dataset):
ds = create_dataset()
assert has_reference(ds)
def test_parse_reference_succeeds(self, create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
parsed = parse_reference(ds)
match(obj, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SOP_CLASS)
match(obj, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SOP_INSTANCE)
match(seq, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SERIES)
def test_parse_reference_throws_when_sequence_is_missing(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value]
with pytest.raises(MissingSequence):
parse_reference(ds)
def test_parse_reference_throws_when_object_is_missing(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value].value[0]
with pytest.raises(MalformedSequence):
parse_reference(ds)
def test_parse_reference_throws_when_class_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
del obj[ReferenceTag.SOP_CLASS.value]
with pytest.raises(MissingTag):
parse_reference(ds)
def test_parse_reference_throws_when_instance_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
del obj[ReferenceTag.SOP_INSTANCE.value]
with pytest.raises(MissingTag):
parse_reference(ds)
def test_parse_reference_throws_when_series_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
del seq[ReferenceTag.SERIES.value]
with pytest.raises(MissingTag):
parse_reference(ds)
| 32.940594
| 82
| 0.62098
|
import pytest
from breakdb.parse import has_reference, parse_reference
from breakdb.tag import ReferenceTag, get_tag_at, MalformedSequence, \
MissingSequence, MissingTag
from tests.helpers.tag import match
class TestParseReference:
def test_has_reference_is_false_when_reference_is_missing(self,
create_dataset):
ds = create_dataset(excludes=[ReferenceTag.SEQUENCE])
assert not has_reference(ds)
def test_has_reference_is_false_when_no_references_exist(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value].value[0]
assert not has_reference(ds)
def test_has_reference_succeeds(self, create_dataset):
ds = create_dataset()
assert has_reference(ds)
def test_parse_reference_succeeds(self, create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
parsed = parse_reference(ds)
match(obj, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SOP_CLASS)
match(obj, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SOP_INSTANCE)
match(seq, parsed[ReferenceTag.SEQUENCE.value], ReferenceTag.SERIES)
def test_parse_reference_throws_when_sequence_is_missing(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value]
with pytest.raises(MissingSequence):
parse_reference(ds)
def test_parse_reference_throws_when_object_is_missing(self,
create_dataset):
ds = create_dataset()
del ds[ReferenceTag.SEQUENCE.value].value[0]
with pytest.raises(MalformedSequence):
parse_reference(ds)
def test_parse_reference_throws_when_class_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
del obj[ReferenceTag.SOP_CLASS.value]
with pytest.raises(MissingTag):
parse_reference(ds)
def test_parse_reference_throws_when_instance_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
obj = get_tag_at(seq, 0, ReferenceTag.OBJECT)
del obj[ReferenceTag.SOP_INSTANCE.value]
with pytest.raises(MissingTag):
parse_reference(ds)
def test_parse_reference_throws_when_series_is_missing(self,
create_dataset):
ds = create_dataset()
seq = get_tag_at(ds, 0, ReferenceTag.SEQUENCE)
del seq[ReferenceTag.SERIES.value]
with pytest.raises(MissingTag):
parse_reference(ds)
| true
| true
|
1c45b05c5d250ea77c37d28b3bab75d2b9cf9824
| 143,725
|
py
|
Python
|
corehq/apps/accounting/models.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/accounting/models.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/accounting/models.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import itertools
from decimal import Decimal
from io import BytesIO
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models, transaction
from django.db.models import F, Q
from django.db.models.manager import Manager
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
import jsonfield
import stripe
from django_prbac.models import Role
from memoized import memoized
from corehq.apps.domain.shortcuts import publish_domain_saved
from dimagi.ext.couchdbkit import (
BooleanProperty,
DateTimeProperty,
SafeSaveDocument,
StringProperty,
)
from dimagi.utils.web import get_site_domain
from corehq.apps.accounting.emails import send_subscription_change_alert
from corehq.apps.accounting.exceptions import (
AccountingError,
CreditLineError,
InvoiceEmailThrottledError,
NewSubscriptionError,
ProductPlanNotFoundError,
SubscriptionAdjustmentError,
SubscriptionChangeError,
SubscriptionReminderError,
SubscriptionRenewalError,
)
from corehq.apps.accounting.invoice_pdf import InvoiceTemplate
from corehq.apps.accounting.signals import subscription_upgrade_or_downgrade
from corehq.apps.accounting.subscription_changes import (
DomainDowngradeActionHandler,
DomainUpgradeActionHandler,
)
from corehq.apps.accounting.utils import (
EXCHANGE_RATE_DECIMAL_PLACES,
ensure_domain_instance,
fmt_dollar_amount,
get_account_name_from_default_name,
get_address_from_invoice,
get_change_status,
get_dimagi_from_email,
get_privileges,
is_active_subscription,
log_accounting_error,
log_accounting_info,
quantize_accounting_decimal,
)
from corehq.apps.domain import UNKNOWN_DOMAIN
from corehq.apps.domain.models import Domain
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.users.models import WebUser
from corehq.blobs.mixin import CODES, BlobMixin
from corehq.const import USER_DATE_FORMAT
from corehq.privileges import REPORT_BUILDER_ADD_ON_PRIVS
from corehq.util.dates import get_first_last_days
from corehq.util.mixin import ValidateModelMixin
from corehq.util.quickcache import quickcache
from corehq.util.soft_assert import soft_assert
from corehq.util.view_utils import absolute_reverse
integer_field_validators = [MaxValueValidator(2147483647), MinValueValidator(-2147483648)]
MAX_INVOICE_COMMUNICATIONS = 5
SMALL_INVOICE_THRESHOLD = 100
UNLIMITED_FEATURE_USAGE = -1
MINIMUM_SUBSCRIPTION_LENGTH = 30
_soft_assert_contact_emails_missing = soft_assert(
to=['{}@{}'.format(email, 'dimagi.com') for email in [
'accounts',
'billing-dev',
]],
exponential_backoff=False,
)
class BillingAccountType(object):
CONTRACT = "CONTRACT"
USER_CREATED = "USER_CREATED"
GLOBAL_SERVICES = "GLOBAL_SERVICES"
INVOICE_GENERATED = "INVOICE_GENERATED"
TRIAL = "TRIAL"
CHOICES = (
(CONTRACT, "Created by contract"),
(USER_CREATED, "Created by user"),
(GLOBAL_SERVICES, "Created by Global Services"),
(INVOICE_GENERATED, "Generated by an invoice"),
(TRIAL, "Is trial account"),
)
class InvoicingPlan(object):
MONTHLY = "MONTHLY"
QUARTERLY = "QUARTERLY"
YEARLY = "YEARLY"
CHOICES = (
(MONTHLY, "Monthly"),
(QUARTERLY, "Quarterly"),
(YEARLY, "Yearly")
)
class FeatureType(object):
USER = "User"
SMS = "SMS"
CHOICES = (
(USER, USER),
(SMS, SMS),
)
class SoftwarePlanEdition(object):
COMMUNITY = "Community"
STANDARD = "Standard"
PRO = "Pro"
ADVANCED = "Advanced"
ENTERPRISE = "Enterprise"
RESELLER = "Reseller"
MANAGED_HOSTING = "Managed Hosting"
PAUSED = "Paused"
CHOICES = (
(COMMUNITY, COMMUNITY),
(STANDARD, STANDARD),
(PRO, PRO),
(ADVANCED, ADVANCED),
(ENTERPRISE, ENTERPRISE),
(PAUSED, PAUSED),
(RESELLER, RESELLER),
(MANAGED_HOSTING, MANAGED_HOSTING),
)
SELF_SERVICE_ORDER = [
PAUSED,
COMMUNITY,
STANDARD,
PRO,
ADVANCED,
]
class SoftwarePlanVisibility(object):
PUBLIC = "PUBLIC"
INTERNAL = "INTERNAL"
TRIAL = "TRIAL"
CHOICES = (
(PUBLIC, "Anyone can subscribe"),
(INTERNAL, "Dimagi must create subscription"),
(TRIAL, "This is a Trial Plan"),
)
class CreditAdjustmentReason(object):
DIRECT_PAYMENT = "DIRECT_PAYMENT"
SALESFORCE = "SALESFORCE"
INVOICE = "INVOICE"
LINE_ITEM = "LINE_ITEM"
TRANSFER = "TRANSFER"
MANUAL = "MANUAL"
CHOICES = (
(MANUAL, "manual"),
(SALESFORCE, "via Salesforce"),
(INVOICE, "invoice generated"),
(LINE_ITEM, "line item generated"),
(TRANSFER, "transfer from another credit line"),
(DIRECT_PAYMENT, "payment from client received"),
)
class SubscriptionAdjustmentReason(object):
CREATE = "CREATE"
MODIFY = "MODIFY"
CANCEL = "CANCEL"
UPGRADE = "UPGRADE"
DOWNGRADE = "DOWNGRADE"
SWITCH = "SWITCH"
REACTIVATE = "REACTIVATE"
RENEW = "RENEW"
CHOICES = (
(CREATE, "A new subscription created from scratch."),
(MODIFY, "Some part of the subscription was modified...likely a date."),
(CANCEL, "The subscription was cancelled with no followup subscription."),
(UPGRADE, "The subscription was upgraded to the related subscription."),
(DOWNGRADE, "The subscription was downgraded to the related subscription."),
(SWITCH, "The plan was changed to the related subscription and "
"was neither an upgrade or downgrade."),
(REACTIVATE, "The subscription was reactivated."),
(RENEW, "The subscription was renewed."),
)
class SubscriptionAdjustmentMethod(object):
USER = "USER"
INTERNAL = "INTERNAL"
TASK = "TASK"
TRIAL = "TRIAL"
AUTOMATIC_DOWNGRADE = 'AUTOMATIC_DOWNGRADE'
DEFAULT_COMMUNITY = 'DEFAULT_COMMUNITY'
INVOICING = 'INVOICING'
CHOICES = (
(USER, "User"),
(INTERNAL, "Ops"),
(TASK, "[Deprecated] Task (Invoicing)"),
(TRIAL, "30 Day Trial"),
(AUTOMATIC_DOWNGRADE, "Automatic Downgrade"),
(DEFAULT_COMMUNITY, 'Default to Community'),
(INVOICING, 'Invoicing')
)
class PaymentMethodType(object):
STRIPE = "Stripe"
CHOICES = (
(STRIPE, STRIPE),
)
class SubscriptionType(object):
IMPLEMENTATION = "IMPLEMENTATION"
PRODUCT = "PRODUCT"
TRIAL = "TRIAL"
EXTENDED_TRIAL = "EXTENDED_TRIAL"
SANDBOX = "SANDBOX"
INTERNAL = "INTERNAL"
NOT_SET = "NOT_SET"
CHOICES = (
(IMPLEMENTATION, "Implementation"),
(PRODUCT, "Product"),
(TRIAL, "Trial"),
(EXTENDED_TRIAL, "Extended Trial"),
(SANDBOX, "Sandbox"),
(INTERNAL, "Internal"),
)
class ProBonoStatus(object):
YES = "PRO_BONO"
NO = "FULL_PRICE"
DISCOUNTED = "DISCOUNTED"
CHOICES = (
(NO, "Full Price"),
(DISCOUNTED, "Discounted"),
(YES, "Pro Bono"),
)
class FundingSource(object):
DIMAGI = "DIMAGI"
CLIENT = "CLIENT"
EXTERNAL = "EXTERNAL"
CHOICES = (
(DIMAGI, "Dimagi"),
(CLIENT, "Client Funding"),
(EXTERNAL, "External Funding"),
)
class EntryPoint(object):
CONTRACTED = "CONTRACTED"
SELF_STARTED = "SELF_STARTED"
NOT_SET = "NOT_SET"
CHOICES = (
(CONTRACTED, "Contracted"),
(SELF_STARTED, "Self-started"),
(NOT_SET, "Not Set"),
)
class LastPayment(object):
CC_ONE_TIME = "CC_ONE_TIME"
CC_AUTO = "CC_AUTO"
WIRE = "WIRE"
ACH = "ACH"
OTHER = "OTHER"
BU_PAYMENT = "BU_PAYMENT"
NONE = "NONE"
CHOICES = (
(CC_ONE_TIME, "Credit Card - One Time"),
(CC_AUTO, "Credit Card - Autopay"),
(WIRE, "Wire"),
(ACH, "ACH"),
(OTHER, "Other"),
(BU_PAYMENT, "Payment to local BU"),
(NONE, "None"),
)
class PreOrPostPay(object):
PREPAY = "PREPAY"
POSTPAY = "POSTPAY"
NOT_SET = "NOT_SET"
CHOICES = (
(PREPAY, "Prepay"),
(POSTPAY, "Postpay"),
(NOT_SET, "Not Set"),
)
class Currency(models.Model):
"""
Keeps track of the current conversion rates so that we don't have to poll the free, but rate limited API
from Open Exchange Rates. Necessary for billing things like MACH SMS.
"""
code = models.CharField(max_length=3, unique=True)
name = models.CharField(max_length=25, db_index=True)
symbol = models.CharField(max_length=10)
rate_to_default = models.DecimalField(
default=Decimal('1.0'), max_digits=20,
decimal_places=EXCHANGE_RATE_DECIMAL_PLACES,
)
date_updated = models.DateField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@classmethod
def get_default(cls):
default, _ = cls.objects.get_or_create(code=settings.DEFAULT_CURRENCY)
return default
DEFAULT_ACCOUNT_FORMAT = 'Account for Project %s'
class BillingAccount(ValidateModelMixin, models.Model):
"""
The key model that links a Subscription to its financial source and methods of payment.
"""
name = models.CharField(max_length=200, db_index=True, unique=True)
salesforce_account_id = models.CharField(
db_index=True,
max_length=80,
blank=True,
null=True,
help_text="This is how we link to the salesforce account",
)
created_by = models.CharField(max_length=80, blank=True)
created_by_domain = models.CharField(max_length=256, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
dimagi_contact = models.EmailField(blank=True)
currency = models.ForeignKey(Currency, on_delete=models.PROTECT)
is_auto_invoiceable = models.BooleanField(default=False)
date_confirmed_extra_charges = models.DateTimeField(null=True, blank=True)
account_type = models.CharField(
max_length=25,
default=BillingAccountType.CONTRACT,
choices=BillingAccountType.CHOICES,
)
is_active = models.BooleanField(default=True)
is_customer_billing_account = models.BooleanField(default=False, db_index=True)
enterprise_admin_emails = ArrayField(models.EmailField(), default=list, blank=True)
enterprise_restricted_signup_domains = ArrayField(models.CharField(max_length=128), default=list, blank=True)
invoicing_plan = models.CharField(
max_length=25,
default=InvoicingPlan.MONTHLY,
choices=InvoicingPlan.CHOICES
)
entry_point = models.CharField(
max_length=25,
default=EntryPoint.NOT_SET,
choices=EntryPoint.CHOICES,
)
auto_pay_user = models.CharField(max_length=80, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
last_payment_method = models.CharField(
max_length=25,
default=LastPayment.NONE,
choices=LastPayment.CHOICES,
)
pre_or_post_pay = models.CharField(
max_length=25,
default=PreOrPostPay.NOT_SET,
choices=PreOrPostPay.CHOICES,
)
# Settings visible to external users
restrict_domain_creation = models.BooleanField(default=False)
restrict_signup = models.BooleanField(default=False, db_index=True)
restrict_signup_message = models.CharField(max_length=512, null=True, blank=True)
class Meta(object):
app_label = 'accounting'
@property
def auto_pay_enabled(self):
return self.auto_pay_user is not None
@classmethod
def create_account_for_domain(cls, domain,
created_by=None, account_type=None,
entry_point=None, last_payment_method=None,
pre_or_post_pay=None):
account_type = account_type or BillingAccountType.INVOICE_GENERATED
entry_point = entry_point or EntryPoint.NOT_SET
last_payment_method = last_payment_method or LastPayment.NONE
pre_or_post_pay = pre_or_post_pay or PreOrPostPay.POSTPAY
default_name = DEFAULT_ACCOUNT_FORMAT % domain
name = get_account_name_from_default_name(default_name)
return BillingAccount.objects.create(
name=name,
created_by=created_by,
created_by_domain=domain,
currency=Currency.get_default(),
account_type=account_type,
entry_point=entry_point,
last_payment_method=last_payment_method,
pre_or_post_pay=pre_or_post_pay
)
@classmethod
def get_or_create_account_by_domain(cls, domain,
created_by=None, account_type=None,
entry_point=None, last_payment_method=None,
pre_or_post_pay=None):
"""
First try to grab the account used for the last subscription.
If an account is not found, create it.
"""
account = cls.get_account_by_domain(domain)
if account:
return account, False
return cls.create_account_for_domain(
domain,
created_by=created_by,
account_type=account_type,
entry_point=entry_point,
last_payment_method=last_payment_method,
pre_or_post_pay=pre_or_post_pay,
), True
@classmethod
def get_account_by_domain(cls, domain):
current_subscription = Subscription.get_active_subscription_by_domain(domain)
if current_subscription is not None:
return current_subscription.account
else:
return cls._get_account_by_created_by_domain(domain)
@classmethod
def _get_account_by_created_by_domain(cls, domain):
try:
return cls.objects.get(created_by_domain=domain)
except cls.DoesNotExist:
return None
except cls.MultipleObjectsReturned:
log_accounting_error(
f"Multiple billing accounts showed up for the domain '{domain}'. The "
"latest one was served, but you should reconcile very soon.",
show_stack_trace=True,
)
return cls.objects.filter(created_by_domain=domain).latest('date_created')
return None
@classmethod
@quickcache([], timeout=60 * 60)
def get_enterprise_restricted_signup_accounts(cls):
return BillingAccount.objects.filter(is_customer_billing_account=True, restrict_signup=True)
@property
def autopay_card(self):
if not self.auto_pay_enabled:
return None
return StripePaymentMethod.objects.get(web_user=self.auto_pay_user).get_autopay_card(self)
def has_enterprise_admin(self, email):
return self.is_customer_billing_account and email in self.enterprise_admin_emails
def update_autopay_user(self, new_user, domain):
if self.auto_pay_enabled and new_user != self.auto_pay_user:
self._send_autopay_card_removed_email(new_user=new_user, domain=domain)
self.auto_pay_user = new_user
self.save()
self._send_autopay_card_added_email(domain)
def remove_autopay_user(self):
self.auto_pay_user = None
self.save()
def _send_autopay_card_removed_email(self, new_user, domain):
"""Sends an email to the old autopayer for this account telling them {new_user} is now the autopayer"""
from corehq.apps.domain.views.accounting import EditExistingBillingAccountView
old_user = self.auto_pay_user
subject = _("Your card is no longer being used to auto-pay for {billing_account}").format(
billing_account=self.name)
old_web_user = WebUser.get_by_username(old_user)
if old_web_user:
old_user_name = old_web_user.first_name
else:
old_user_name = old_user
context = {
'new_user': new_user,
'old_user_name': old_user_name,
'billing_account_name': self.name,
'billing_info_url': absolute_reverse(EditExistingBillingAccountView.urlname,
args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
send_html_email_async(
subject,
old_user,
render_to_string('accounting/email/autopay_card_removed.html', context),
text_content=strip_tags(render_to_string('accounting/email/autopay_card_removed.html', context)),
)
def _send_autopay_card_added_email(self, domain):
"""Sends an email to the new autopayer for this account telling them they are now the autopayer"""
from corehq.apps.domain.views.accounting import EditExistingBillingAccountView
subject = _("Your card is being used to auto-pay for {billing_account}").format(
billing_account=self.name)
web_user = WebUser.get_by_username(self.auto_pay_user)
new_user_name = web_user.first_name if web_user else self.auto_pay_user
try:
last_4 = self.autopay_card.last4
except StripePaymentMethod.DoesNotExist:
last_4 = None
context = {
'name': new_user_name,
'email': self.auto_pay_user,
'domain': domain,
'last_4': last_4,
'billing_account_name': self.name,
'billing_info_url': absolute_reverse(EditExistingBillingAccountView.urlname,
args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
send_html_email_async(
subject,
self.auto_pay_user,
render_to_string('accounting/email/invoice_autopay_setup.html', context),
text_content=strip_tags(render_to_string('accounting/email/invoice_autopay_setup.html', context)),
)
class BillingContactInfo(models.Model):
account = models.OneToOneField(BillingAccount, primary_key=True, null=False, on_delete=models.CASCADE)
first_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name=_("First Name")
)
last_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name=_("Last Name")
)
# TODO - replace with models.ArrayField once django >= 1.9
email_list = jsonfield.JSONField(
default=list,
verbose_name=_("Contact Emails"),
help_text=_("We will email communications regarding your account "
"to the emails specified here.")
)
phone_number = models.CharField(
max_length=20, null=True, blank=True, verbose_name=_("Phone Number")
)
company_name = models.CharField(
max_length=50, null=True, blank=True,
verbose_name=_("Company / Organization")
)
first_line = models.CharField(
max_length=50, null=False,
verbose_name=_("Address First Line")
)
second_line = models.CharField(
max_length=50, null=True, blank=True,
verbose_name=_("Address Second Line")
)
city = models.CharField(
max_length=50, null=False, verbose_name=_("City")
)
state_province_region = models.CharField(
max_length=50, null=False,
verbose_name=_("State / Province / Region"),
)
postal_code = models.CharField(
max_length=20, null=False, verbose_name=_("Postal Code")
)
country = models.CharField(
max_length=50, null=False, verbose_name=_("Country")
)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __init__(self, *args, **kwargs):
super(BillingContactInfo, self).__init__(*args, **kwargs)
if self.email_list == '[]':
self.email_list = []
@property
def full_name(self):
if not self.first_name:
return self.last_name
elif not self.last_name:
return self.first_name
else:
return "%s %s" % (self.first_name, self.last_name)
class SoftwareProductRate(models.Model):
"""
Represents the monthly fixed fee for a software product.
Once created, SoftwareProductRates cannot be modified. Instead, a new SoftwareProductRate must be created.
"""
name = models.CharField(max_length=40)
monthly_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return '%s @ $%s /month' % (self.name, self.monthly_fee)
def __eq__(self, other):
if not isinstance(other, self.__class__) or not self.name == other.name:
return False
for field in ['monthly_fee', 'is_active']:
if not getattr(self, field) == getattr(other, field):
return False
return True
@classmethod
def new_rate(cls, product_name, monthly_fee, save=True):
rate = SoftwareProductRate(name=product_name, monthly_fee=monthly_fee)
if save:
rate.save()
return rate
class Feature(models.Model):
"""
This is what will link a feature type (USER, API, etc.) to a name (Users Pro, API Standard, etc.)
and will be what the FeatureRate references to provide a monthly fee, limit and per-excess fee.
"""
name = models.CharField(max_length=40, unique=True)
feature_type = models.CharField(max_length=10, db_index=True, choices=FeatureType.CHOICES)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "Feature '%s' of type '%s'" % (self.name, self.feature_type)
def get_rate(self, default_instance=True):
try:
return self.featurerate_set.filter(is_active=True).latest('date_created')
except FeatureRate.DoesNotExist:
return FeatureRate() if default_instance else None # the defaults
class FeatureRate(models.Model):
"""
Links a feature to a monthly fee, monthly limit, and a per-excess fee for exceeding the monthly limit.
Once created, Feature Rates cannot be modified. Instead, a new Feature Rate must be created.
"""
feature = models.ForeignKey(Feature, on_delete=models.PROTECT)
monthly_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2,
verbose_name="Monthly Fee")
monthly_limit = models.IntegerField(default=0,
verbose_name="Monthly Included Limit",
validators=integer_field_validators)
per_excess_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2,
verbose_name="Fee Per Excess of Limit")
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return '%s @ $%s /month, $%s /excess, limit: %d' % (
self.feature.name, self.monthly_fee, self.per_excess_fee, self.monthly_limit
)
def __eq__(self, other):
if not isinstance(other, self.__class__) or not self.feature.pk == other.feature.pk:
return False
for field in ['monthly_fee', 'monthly_limit', 'per_excess_fee', 'is_active']:
if not getattr(self, field) == getattr(other, field):
return False
return True
@classmethod
def new_rate(cls, feature_name, feature_type,
monthly_fee=None, monthly_limit=None, per_excess_fee=None, save=True):
feature, _ = Feature.objects.get_or_create(name=feature_name, feature_type=feature_type)
rate = FeatureRate(feature=feature)
if monthly_fee is not None:
rate.monthly_fee = monthly_fee
if monthly_limit is not None:
rate.monthly_limit = monthly_limit
if per_excess_fee is not None:
rate.per_excess_fee = per_excess_fee
if save:
rate.save()
return rate
class SoftwarePlan(models.Model):
"""
Subscriptions are created for Software Plans. Software Plans can have many Software Plan Versions, which
link the Software Plan to a set of permissions roles.
"""
name = models.CharField(max_length=80, unique=True)
description = models.TextField(blank=True,
help_text="If the visibility is INTERNAL, this description field will be used.")
edition = models.CharField(
max_length=25,
default=SoftwarePlanEdition.ENTERPRISE,
choices=SoftwarePlanEdition.CHOICES,
)
visibility = models.CharField(
max_length=10,
default=SoftwarePlanVisibility.INTERNAL,
choices=SoftwarePlanVisibility.CHOICES,
)
last_modified = models.DateTimeField(auto_now=True)
is_customer_software_plan = models.BooleanField(default=False)
max_domains = models.IntegerField(blank=True, null=True)
is_annual_plan = models.BooleanField(default=False)
class Meta(object):
app_label = 'accounting'
@quickcache(vary_on=['self.pk'], timeout=10)
def get_version(self):
try:
return self.softwareplanversion_set.filter(is_active=True).latest('date_created')
except SoftwarePlanVersion.DoesNotExist:
return None
def at_max_domains(self):
if not self.max_domains:
return False
subscription_count = 0
for version in self.softwareplanversion_set.all():
subscription_count += Subscription.visible_objects.filter(plan_version=version, is_active=True).count()
return subscription_count >= self.max_domains
class DefaultProductPlan(models.Model):
"""
This links a product type to its default SoftwarePlan (i.e. the Community Plan).
The latest SoftwarePlanVersion that's linked to this plan will be the one used to create a new subscription if
nothing is found for that domain.
"""
edition = models.CharField(
default=SoftwarePlanEdition.COMMUNITY,
choices=SoftwarePlanEdition.CHOICES,
max_length=25,
)
plan = models.ForeignKey(SoftwarePlan, on_delete=models.PROTECT)
is_trial = models.BooleanField(default=False)
is_report_builder_enabled = models.BooleanField(default=False)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
unique_together = ('edition', 'is_trial', 'is_report_builder_enabled')
@classmethod
@quickcache(['edition', 'is_trial', 'is_report_builder_enabled'],
skip_arg=lambda *args, **kwargs: not settings.ENTERPRISE_MODE or settings.UNIT_TESTING)
def get_default_plan_version(cls, edition=None, is_trial=False,
is_report_builder_enabled=False):
if not edition:
edition = (SoftwarePlanEdition.ENTERPRISE if settings.ENTERPRISE_MODE
else SoftwarePlanEdition.COMMUNITY)
try:
default_product_plan = DefaultProductPlan.objects.select_related('plan').get(
edition=edition, is_trial=is_trial,
is_report_builder_enabled=is_report_builder_enabled
)
return default_product_plan.plan.get_version()
except DefaultProductPlan.DoesNotExist:
raise AccountingError(
"No default product plan was set up, did you forget to run migrations?"
)
@classmethod
def get_lowest_edition(cls, requested_privileges, return_plan=False):
for edition in SoftwarePlanEdition.SELF_SERVICE_ORDER:
plan_version = cls.get_default_plan_version(edition)
privileges = get_privileges(plan_version) - REPORT_BUILDER_ADD_ON_PRIVS
if privileges.issuperset(requested_privileges):
return (plan_version if return_plan
else plan_version.plan.edition)
return None if return_plan else SoftwarePlanEdition.ENTERPRISE
class SoftwarePlanVersion(models.Model):
"""
Links a plan to its rates and provides versioning information.
Once a new SoftwarePlanVersion is created, it cannot be modified. Instead, a new SoftwarePlanVersion
must be created.
"""
plan = models.ForeignKey(SoftwarePlan, on_delete=models.PROTECT)
product_rate = models.ForeignKey(SoftwareProductRate, on_delete=models.CASCADE)
feature_rates = models.ManyToManyField(FeatureRate, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
role = models.ForeignKey(Role, on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "%(plan_name)s (v%(version_num)d)" % {
'plan_name': self.plan.name,
'version_num': self.version,
}
def save(self, *args, **kwargs):
super(SoftwarePlanVersion, self).save(*args, **kwargs)
SoftwarePlan.get_version.clear(self.plan)
@property
def version(self):
return (self.plan.softwareplanversion_set.count() -
self.plan.softwareplanversion_set.filter(
date_created__gt=self.date_created).count())
@property
def user_facing_description(self):
from corehq.apps.accounting.user_text import DESC_BY_EDITION, FEATURE_TYPE_TO_NAME
def _default_description(plan, monthly_limit):
if plan.edition in [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.STANDARD,
SoftwarePlanEdition.PRO,
SoftwarePlanEdition.ADVANCED,
]:
return DESC_BY_EDITION[plan.edition]['description'].format(monthly_limit)
else:
return DESC_BY_EDITION[plan.edition]['description']
desc = {
'name': self.plan.name,
}
if (
self.plan.visibility == SoftwarePlanVisibility.PUBLIC
or self.plan.visibility == SoftwarePlanVisibility.TRIAL
) or not self.plan.description:
desc['description'] = _default_description(self.plan, self.user_feature.monthly_limit)
else:
desc['description'] = self.plan.description
desc.update({
'monthly_fee': 'USD %s' % self.product_rate.monthly_fee,
'rates': [{'name': FEATURE_TYPE_TO_NAME[r.feature.feature_type],
'included': 'Infinite' if r.monthly_limit == UNLIMITED_FEATURE_USAGE else r.monthly_limit}
for r in self.feature_rates.all()],
'edition': self.plan.edition,
})
return desc
@property
@memoized
def user_feature(self):
user_features = self.feature_rates.filter(feature__feature_type=FeatureType.USER)
try:
user_feature = user_features.order_by('monthly_limit')[0]
if not user_feature.monthly_limit == UNLIMITED_FEATURE_USAGE:
user_feature = user_features.order_by('-monthly_limit')[0]
return user_feature
except IndexError:
pass
@property
def user_limit(self):
if self.user_feature is not None:
return self.user_feature.monthly_limit
return UNLIMITED_FEATURE_USAGE
@property
def user_fee(self):
if self.user_feature is not None:
return "USD %d" % self.user_feature.per_excess_fee
def feature_charges_exist_for_domain(self, domain, start_date=None, end_date=None):
domain_obj = ensure_domain_instance(domain)
if domain_obj is None:
return False
from corehq.apps.accounting.usage import FeatureUsageCalculator
for feature_rate in self.feature_rates.all():
if feature_rate.monthly_limit != UNLIMITED_FEATURE_USAGE:
calc = FeatureUsageCalculator(
feature_rate, domain_obj.name, start_date=start_date,
end_date=end_date
)
if calc.get_usage() > feature_rate.monthly_limit:
return True
return False
@property
def is_paused(self):
return self.plan.edition == SoftwarePlanEdition.PAUSED
class SubscriberManager(models.Manager):
def safe_get(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except Subscriber.DoesNotExist:
return None
class Subscriber(models.Model):
"""
The objects that can be subscribed to a Subscription.
"""
domain = models.CharField(max_length=256, unique=True, db_index=True)
last_modified = models.DateTimeField(auto_now=True)
objects = SubscriberManager()
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "DOMAIN %s" % self.domain
def create_subscription(self, new_plan_version, new_subscription, is_internal_change):
assert new_plan_version
assert new_subscription
return self._apply_upgrades_and_downgrades(
new_plan_version=new_plan_version,
new_subscription=new_subscription,
internal_change=is_internal_change,
)
def change_subscription(self, downgraded_privileges, upgraded_privileges, new_plan_version,
old_subscription, new_subscription, internal_change):
return self._apply_upgrades_and_downgrades(
downgraded_privileges=downgraded_privileges,
upgraded_privileges=upgraded_privileges,
new_plan_version=new_plan_version,
old_subscription=old_subscription,
new_subscription=new_subscription,
internal_change=internal_change,
)
def activate_subscription(self, upgraded_privileges, subscription):
return self._apply_upgrades_and_downgrades(
upgraded_privileges=upgraded_privileges,
new_subscription=subscription,
)
def deactivate_subscription(self, downgraded_privileges, upgraded_privileges,
old_subscription, new_subscription):
return self._apply_upgrades_and_downgrades(
downgraded_privileges=downgraded_privileges,
upgraded_privileges=upgraded_privileges,
old_subscription=old_subscription,
new_subscription=new_subscription,
)
def reactivate_subscription(self, new_plan_version, subscription):
return self._apply_upgrades_and_downgrades(
new_plan_version=new_plan_version,
old_subscription=subscription,
new_subscription=subscription,
)
def _apply_upgrades_and_downgrades(self, new_plan_version=None,
downgraded_privileges=None,
upgraded_privileges=None,
old_subscription=None,
new_subscription=None,
internal_change=False):
"""
downgraded_privileges is the list of privileges that should be removed
upgraded_privileges is the list of privileges that should be added
"""
if new_plan_version is None:
new_plan_version = DefaultProductPlan.get_default_plan_version()
if downgraded_privileges is None or upgraded_privileges is None:
change_status_result = get_change_status(None, new_plan_version)
downgraded_privileges = downgraded_privileges or change_status_result.downgraded_privs
upgraded_privileges = upgraded_privileges or change_status_result.upgraded_privs
if downgraded_privileges:
Subscriber._process_downgrade(self.domain, downgraded_privileges, new_plan_version)
if upgraded_privileges:
Subscriber._process_upgrade(self.domain, upgraded_privileges, new_plan_version)
if Subscriber.should_send_subscription_notification(old_subscription, new_subscription):
send_subscription_change_alert(self.domain, new_subscription, old_subscription, internal_change)
subscription_upgrade_or_downgrade.send_robust(None, domain=self.domain)
@staticmethod
def should_send_subscription_notification(old_subscription, new_subscription):
if not old_subscription:
return False
is_new_trial = new_subscription and new_subscription.is_trial
expired_trial = old_subscription.is_trial and not new_subscription
return not is_new_trial and not expired_trial
@staticmethod
def _process_downgrade(domain, downgraded_privileges, new_plan_version):
downgrade_handler = DomainDowngradeActionHandler(
domain, new_plan_version, downgraded_privileges,
)
if not downgrade_handler.get_response():
raise SubscriptionChangeError("The downgrade was not successful.")
@staticmethod
def _process_upgrade(domain, upgraded_privileges, new_plan_version):
upgrade_handler = DomainUpgradeActionHandler(
domain, new_plan_version, upgraded_privileges,
)
if not upgrade_handler.get_response():
raise SubscriptionChangeError("The upgrade was not successful.")
class VisibleSubscriptionManager(models.Manager):
use_in_migrations = True
def get_queryset(self):
return super(VisibleSubscriptionManager, self).get_queryset().filter(is_hidden_to_ops=False)
class DisabledManager(models.Manager):
def get_queryset(self):
raise NotImplementedError
class Subscription(models.Model):
"""
Links a Subscriber to a SoftwarePlan and BillingAccount, necessary for invoicing.
"""
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
plan_version = models.ForeignKey(SoftwarePlanVersion, on_delete=models.PROTECT)
subscriber = models.ForeignKey(Subscriber, on_delete=models.PROTECT)
salesforce_contract_id = models.CharField(blank=True, max_length=80)
date_start = models.DateField()
date_end = models.DateField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=False)
do_not_invoice = models.BooleanField(default=False)
no_invoice_reason = models.CharField(blank=True, max_length=256)
do_not_email_invoice = models.BooleanField(default=False)
do_not_email_reminder = models.BooleanField(default=False)
auto_generate_credits = models.BooleanField(default=False)
is_trial = models.BooleanField(default=False)
skip_invoicing_if_no_feature_charges = models.BooleanField(default=False)
service_type = models.CharField(
max_length=25,
choices=SubscriptionType.CHOICES,
default=SubscriptionType.NOT_SET
)
pro_bono_status = models.CharField(
max_length=25,
choices=ProBonoStatus.CHOICES,
default=ProBonoStatus.NO,
)
funding_source = models.CharField(
max_length=25,
choices=FundingSource.CHOICES,
default=FundingSource.CLIENT
)
last_modified = models.DateTimeField(auto_now=True)
is_hidden_to_ops = models.BooleanField(default=False)
skip_auto_downgrade = models.BooleanField(default=False)
skip_auto_downgrade_reason = models.CharField(blank=True, max_length=256)
visible_objects = VisibleSubscriptionManager()
visible_and_suppressed_objects = models.Manager()
objects = DisabledManager()
class Meta(object):
app_label = 'accounting'
def __str__(self):
return ("Subscription to %(plan_version)s for %(subscriber)s. "
"[%(date_start)s - %(date_end)s]" % {
'plan_version': self.plan_version,
'subscriber': self.subscriber,
'date_start': self.date_start.strftime(USER_DATE_FORMAT),
'date_end': (self.date_end.strftime(USER_DATE_FORMAT)
if self.date_end is not None else "--"),
})
def __eq__(self, other):
return (
other is not None
and other.__class__.__name__ == self.__class__.__name__
and other.plan_version.pk == self.plan_version.pk
and other.date_start == self.date_start
and other.date_end == self.date_end
and other.subscriber.pk == self.subscriber.pk
and other.account.pk == self.account.pk
)
def save(self, *args, **kwargs):
"""
Overloaded to update domain pillow with subscription information
"""
from corehq.apps.accounting.mixins import get_overdue_invoice
super(Subscription, self).save(*args, **kwargs)
Subscription._get_active_subscription_by_domain.clear(Subscription, self.subscriber.domain)
get_overdue_invoice.clear(self.subscriber.domain)
domain = Domain.get_by_name(self.subscriber.domain)
# If a subscriber doesn't have a valid domain associated with it
# we don't care the pillow won't be updated
if domain:
publish_domain_saved(domain)
def delete(self, *args, **kwargs):
super(Subscription, self).delete(*args, **kwargs)
Subscription._get_active_subscription_by_domain.clear(Subscription, self.subscriber.domain)
@property
def is_community(self):
return self.plan_version.plan.edition == SoftwarePlanEdition.COMMUNITY
@property
def allowed_attr_changes(self):
"""
These are the attributes of a Subscription that can always be
changed while the subscription is active (or reactivated)
"""
return ['do_not_invoice', 'no_invoice_reason',
'salesforce_contract_id', 'skip_auto_downgrade']
@property
def next_subscription_filter(self):
return (Subscription.visible_objects.
filter(subscriber=self.subscriber, date_start__gt=self.date_start).
exclude(pk=self.pk).
filter(Q(date_end__isnull=True) | ~Q(date_start=F('date_end'))))
@property
def previous_subscription_filter(self):
return Subscription.visible_objects.filter(
subscriber=self.subscriber,
date_start__lt=self.date_start - datetime.timedelta(days=1)
).exclude(pk=self.pk)
@property
def is_renewed(self):
"""
Checks to see if there's another Subscription for this subscriber
that starts after this subscription.
"""
return self.next_subscription_filter.exists()
@property
def next_subscription(self):
try:
return self.next_subscription_filter.order_by('date_start')[0]
except (Subscription.DoesNotExist, IndexError):
return None
@property
def previous_subscription(self):
try:
return self.previous_subscription_filter.order_by('-date_end')[0]
except (Subscription.DoesNotExist, IndexError):
return None
def raise_conflicting_dates(self, date_start, date_end):
"""Raises a subscription Adjustment error if the specified date range
conflicts with other subscriptions related to this subscriber.
"""
assert date_start is not None
for sub in Subscription.visible_objects.filter(
Q(date_end__isnull=True) | Q(date_end__gt=F('date_start')),
subscriber=self.subscriber,
).exclude(
id=self.id,
):
related_has_no_end = sub.date_end is None
current_has_no_end = date_end is None
start_before_related_end = sub.date_end is not None and date_start < sub.date_end
start_before_related_start = date_start < sub.date_start
start_after_related_start = date_start > sub.date_start
end_before_related_end = (
date_end is not None and sub.date_end is not None
and date_end < sub.date_end
)
end_after_related_end = (
date_end is not None and sub.date_end is not None
and date_end > sub.date_end
)
end_after_related_start = date_end is not None and date_end > sub.date_start
if (
(start_before_related_end and start_after_related_start)
or (start_after_related_start and related_has_no_end)
or (end_after_related_start and end_before_related_end)
or (end_after_related_start and related_has_no_end)
or (start_before_related_start and end_after_related_end)
or (start_before_related_end and current_has_no_end)
or (current_has_no_end and related_has_no_end)
):
raise SubscriptionAdjustmentError(
"The start date of %(start_date)s conflicts with the "
"subscription dates to %(related_sub)s." % {
'start_date': self.date_start.strftime(USER_DATE_FORMAT),
'related_sub': sub,
}
)
def update_subscription(self, date_start, date_end,
do_not_invoice=None,
no_invoice_reason=None, do_not_email_invoice=None,
do_not_email_reminder=None, salesforce_contract_id=None,
auto_generate_credits=None,
web_user=None, note=None, adjustment_method=None,
service_type=None, pro_bono_status=None, funding_source=None,
skip_invoicing_if_no_feature_charges=None, skip_auto_downgrade=None,
skip_auto_downgrade_reason=None):
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
self._update_dates(date_start, date_end)
self._update_properties(
do_not_invoice=do_not_invoice,
no_invoice_reason=no_invoice_reason,
skip_invoicing_if_no_feature_charges=skip_invoicing_if_no_feature_charges,
do_not_email_invoice=do_not_email_invoice,
do_not_email_reminder=do_not_email_reminder,
auto_generate_credits=auto_generate_credits,
salesforce_contract_id=salesforce_contract_id,
service_type=service_type,
pro_bono_status=pro_bono_status,
funding_source=funding_source,
skip_auto_downgrade=skip_auto_downgrade,
skip_auto_downgrade_reason=skip_auto_downgrade_reason,
)
self.save()
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.MODIFY
)
def _update_dates(self, date_start, date_end):
if not date_start:
raise SubscriptionAdjustmentError('Start date must be provided')
if date_end is not None and date_start > date_end:
raise SubscriptionAdjustmentError(
"Can't have a subscription start after the end date."
)
self.raise_conflicting_dates(date_start, date_end)
self.date_start = date_start
self.date_end = date_end
is_active_dates = is_active_subscription(self.date_start, self.date_end)
if self.is_active != is_active_dates:
if is_active_dates:
self.is_active = True
self.subscriber.activate_subscription(get_privileges(self.plan_version), self)
else:
raise SubscriptionAdjustmentError(
'Cannot deactivate a subscription here. Cancel subscription instead.'
)
def _update_properties(self, **kwargs):
property_names = {
'do_not_invoice',
'no_invoice_reason',
'skip_invoicing_if_no_feature_charges',
'do_not_email_invoice',
'do_not_email_reminder',
'auto_generate_credits',
'salesforce_contract_id',
'service_type',
'pro_bono_status',
'funding_source',
'skip_auto_downgrade',
'skip_auto_downgrade_reason',
}
assert property_names >= set(kwargs.keys())
for property_name, property_value in kwargs.items():
if property_value is not None:
setattr(self, property_name, property_value)
@transaction.atomic
def change_plan(self, new_plan_version, date_end=None,
note=None, web_user=None, adjustment_method=None,
service_type=None, pro_bono_status=None, funding_source=None,
transfer_credits=True, internal_change=False, account=None,
do_not_invoice=None, no_invoice_reason=None,
auto_generate_credits=False, is_trial=False):
"""
Changing a plan TERMINATES the current subscription and
creates a NEW SUBSCRIPTION where the old plan left off.
This is not the same thing as simply updating the subscription.
"""
from corehq.apps.analytics.tasks import track_workflow
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
today = datetime.date.today()
assert self.is_active
assert date_end is None or date_end >= today
if new_plan_version.plan.at_max_domains() and self.plan_version.plan != new_plan_version.plan:
raise SubscriptionAdjustmentError(
'The maximum number of project spaces has been reached for %(new_plan_version)s. ' % {
'new_plan_version': new_plan_version,
}
)
self.date_end = today
self.is_active = False
self.save()
new_subscription = Subscription(
account=account if account else self.account,
plan_version=new_plan_version,
subscriber=self.subscriber,
salesforce_contract_id=self.salesforce_contract_id,
date_start=today,
date_end=date_end,
is_active=True,
do_not_invoice=do_not_invoice if do_not_invoice is not None else self.do_not_invoice,
no_invoice_reason=no_invoice_reason if no_invoice_reason is not None else self.no_invoice_reason,
auto_generate_credits=auto_generate_credits,
is_trial=is_trial,
service_type=(service_type or SubscriptionType.NOT_SET),
pro_bono_status=(pro_bono_status or ProBonoStatus.NO),
funding_source=(funding_source or FundingSource.CLIENT),
skip_auto_downgrade=False,
skip_auto_downgrade_reason='',
)
new_subscription.save()
new_subscription.raise_conflicting_dates(new_subscription.date_start, new_subscription.date_end)
new_subscription.set_billing_account_entry_point()
change_status_result = get_change_status(self.plan_version, new_plan_version)
self.subscriber.change_subscription(
downgraded_privileges=change_status_result.downgraded_privs,
upgraded_privileges=change_status_result.upgraded_privs,
new_plan_version=new_plan_version,
old_subscription=self,
new_subscription=new_subscription,
internal_change=internal_change,
)
# transfer existing credit lines to the new subscription
if transfer_credits:
self.transfer_credits(new_subscription)
# record transfer from old subscription
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=change_status_result.adjustment_reason, related_subscription=new_subscription
)
SubscriptionAdjustment.record_adjustment(
new_subscription, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.CREATE
)
upgrade_reasons = [SubscriptionAdjustmentReason.UPGRADE, SubscriptionAdjustmentReason.CREATE]
if web_user and adjustment_method == SubscriptionAdjustmentMethod.USER:
if change_status_result.adjustment_reason in upgrade_reasons:
track_workflow(web_user, 'Changed Plan: Upgrade')
if change_status_result.adjustment_reason == SubscriptionAdjustmentReason.DOWNGRADE:
track_workflow(web_user, 'Changed Plan: Downgrade')
return new_subscription
def reactivate_subscription(self, date_end=None, note=None, web_user=None,
adjustment_method=None, **kwargs):
"""
This assumes that a subscription was cancelled then recreated with the
same date_start as the last subscription's date_end (with no other subscriptions
created in between).
"""
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
self.date_end = date_end
self.is_active = True
for allowed_attr in self.allowed_attr_changes:
if allowed_attr in kwargs:
setattr(self, allowed_attr, kwargs[allowed_attr])
self.save()
self.subscriber.reactivate_subscription(
new_plan_version=self.plan_version,
subscription=self,
)
SubscriptionAdjustment.record_adjustment(
self, reason=SubscriptionAdjustmentReason.REACTIVATE,
method=adjustment_method, note=note, web_user=web_user,
)
def renew_subscription(self, note=None, web_user=None,
adjustment_method=None,
service_type=None, pro_bono_status=None,
funding_source=None, new_version=None):
"""
This creates a new subscription with a date_start that is
equivalent to the current subscription's date_end.
- The date_end is left None.
- The plan_version is the cheapest self-subscribable plan with the
same set of privileges that the current plan has.
"""
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
if self.date_end is None:
raise SubscriptionRenewalError(
"Cannot renew a subscription with no date_end set."
)
if new_version is None:
current_privileges = get_privileges(self.plan_version)
new_version = DefaultProductPlan.get_lowest_edition(
current_privileges, return_plan=True,
)
if new_version is None:
# this should NEVER happen, but on the off-chance that it does...
raise SubscriptionRenewalError(
"There was an issue renewing your subscription. Someone "
"from Dimagi will get back to you shortly."
)
renewed_subscription = Subscription(
account=self.account,
plan_version=new_version,
subscriber=self.subscriber,
salesforce_contract_id=self.salesforce_contract_id,
date_start=self.date_end,
date_end=None,
)
if service_type is not None:
renewed_subscription.service_type = service_type
if pro_bono_status is not None:
renewed_subscription.pro_bono_status = pro_bono_status
if funding_source is not None:
renewed_subscription.funding_source = funding_source
if datetime.date.today() == self.date_end:
renewed_subscription.is_active = True
renewed_subscription.save()
# record renewal from old subscription
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.RENEW,
)
return renewed_subscription
def transfer_credits(self, subscription=None):
"""Transfers all credit balances related to an account or subscription
(if specified).
"""
if subscription is not None and self.account.pk != subscription.account.pk:
raise CreditLineError(
"Can only transfer subscription credits under the same "
"Billing Account."
)
source_credits = CreditLine.objects.filter(
account=self.account,
subscription=self,
).all()
for credit_line in source_credits:
transferred_credit = CreditLine.add_credit(
credit_line.balance,
account=self.account,
subscription=subscription,
feature_type=credit_line.feature_type,
is_product=credit_line.is_product,
related_credit=credit_line
)
credit_line.is_active = False
credit_line.adjust_credit_balance(
credit_line.balance * Decimal('-1'),
related_credit=transferred_credit,
)
def send_ending_reminder_email(self):
"""
Sends a reminder email to the emails specified in the accounting
contacts that the subscription will end on the specified end date.
"""
if self.date_end is None:
raise SubscriptionReminderError(
"This subscription has no end date."
)
today = datetime.date.today()
num_days_left = (self.date_end - today).days
domain_name = self.subscriber.domain
context = self.ending_reminder_context
subject = context['subject']
template = self.ending_reminder_email_html
template_plaintext = self.ending_reminder_email_text
email_html = render_to_string(template, context)
email_plaintext = render_to_string(template_plaintext, context)
bcc = [settings.ACCOUNTS_EMAIL] if not self.is_trial else []
if self.account.dimagi_contact is not None:
bcc.append(self.account.dimagi_contact)
for email in self._reminder_email_contacts(domain_name):
send_html_email_async.delay(
subject, email, email_html,
text_content=email_plaintext,
email_from=get_dimagi_from_email(),
bcc=bcc,
)
log_accounting_info(
"Sent %(days_left)s-day subscription reminder "
"email for %(domain)s to %(email)s." % {
'days_left': num_days_left,
'domain': domain_name,
'email': email,
}
)
@property
def ending_reminder_email_html(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder.html'
elif self.is_trial:
return 'accounting/email/trial_ending_reminder.html'
else:
return 'accounting/email/subscription_ending_reminder.html'
@property
def ending_reminder_email_text(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder.txt'
elif self.is_trial:
return 'accounting/email/trial_ending_reminder.txt'
else:
return 'accounting/email/subscription_ending_reminder.txt'
@property
def ending_reminder_context(self):
from corehq.apps.domain.views.accounting import DomainSubscriptionView
today = datetime.date.today()
num_days_left = (self.date_end - today).days
if num_days_left == 1:
ending_on = _("tomorrow!")
else:
ending_on = _("on %s." % self.date_end.strftime(USER_DATE_FORMAT))
user_desc = self.plan_version.user_facing_description
plan_name = user_desc['name']
domain_name = self.subscriber.domain
context = {
'domain': domain_name,
'plan_name': plan_name,
'account': self.account.name,
'ending_on': ending_on,
'subscription_url': absolute_reverse(
DomainSubscriptionView.urlname, args=[self.subscriber.domain]),
'base_url': get_site_domain(),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
'sales_email': settings.SALES_EMAIL,
}
if self.account.is_customer_billing_account:
subject = _(
"CommCare Alert: %(account_name)s's subscription to "
"%(plan_name)s ends %(ending_on)s"
) % {
'account_name': self.account.name,
'plan_name': plan_name,
'ending_on': ending_on,
}
elif self.is_trial:
subject = _("CommCare Alert: 30 day trial for '%(domain)s' "
"ends %(ending_on)s") % {
'domain': domain_name,
'ending_on': ending_on,
}
else:
subject = _(
"CommCare Alert: %(domain)s's subscription to "
"%(plan_name)s ends %(ending_on)s"
) % {
'plan_name': plan_name,
'domain': domain_name,
'ending_on': ending_on,
}
context.update({'subject': subject})
return context
def send_dimagi_ending_reminder_email(self):
if self.date_end is None:
raise SubscriptionReminderError(
"This subscription has no end date."
)
if self.account.dimagi_contact is None:
raise SubscriptionReminderError(
"This subscription has no Dimagi contact."
)
subject = self.dimagi_ending_reminder_subject
context = self.dimagi_ending_reminder_context
email_html = render_to_string(self.dimagi_ending_reminder_email_html, context)
email_plaintext = render_to_string(self.dimagi_ending_reminder_email_text, context)
send_html_email_async.delay(
subject, self.account.dimagi_contact, email_html,
text_content=email_plaintext,
email_from=settings.DEFAULT_FROM_EMAIL,
)
@property
def dimagi_ending_reminder_email_html(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder_dimagi.html'
else:
return 'accounting/email/subscription_ending_reminder_dimagi.html'
@property
def dimagi_ending_reminder_email_text(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder_dimagi.txt'
else:
return 'accounting/email/subscription_ending_reminder_dimagi.txt'
@property
def dimagi_ending_reminder_subject(self):
if self.account.is_customer_billing_account:
return "Alert: {account}'s subscriptions are ending on {end_date}".format(
account=self.account.name,
end_date=self.date_end.strftime(USER_DATE_FORMAT))
else:
return "Alert: {domain}'s subscription is ending on {end_date}".format(
domain=self.subscriber.domain,
end_date=self.date_end.strftime(USER_DATE_FORMAT))
@property
def dimagi_ending_reminder_context(self):
end_date = self.date_end.strftime(USER_DATE_FORMAT)
email = self.account.dimagi_contact
if self.account.is_customer_billing_account:
account = self.account.name
plan = self.plan_version.plan.edition
context = {
'account': account,
'plan': plan,
'end_date': end_date,
'client_reminder_email_date': (self.date_end - datetime.timedelta(days=30)).strftime(
USER_DATE_FORMAT),
'contacts': ', '.join(self._reminder_email_contacts(self.subscriber.domain)),
'dimagi_contact': email,
'accounts_email': settings.ACCOUNTS_EMAIL
}
else:
domain = self.subscriber.domain
context = {
'domain': domain,
'end_date': end_date,
'client_reminder_email_date': (self.date_end - datetime.timedelta(days=30)).strftime(
USER_DATE_FORMAT),
'contacts': ', '.join(self._reminder_email_contacts(domain)),
'dimagi_contact': email,
}
return context
def _reminder_email_contacts(self, domain_name):
emails = {a.username for a in WebUser.get_admins_by_domain(domain_name)}
emails |= {e for e in WebUser.get_dimagi_emails_by_domain(domain_name)}
if not self.is_trial:
billing_contact_emails = (
self.account.billingcontactinfo.email_list
if BillingContactInfo.objects.filter(account=self.account).exists() else []
)
if not billing_contact_emails:
from corehq.apps.accounting.views import ManageBillingAccountView
_soft_assert_contact_emails_missing(
False,
'Billing Account for project %s is missing client contact emails: %s' % (
domain_name,
absolute_reverse(ManageBillingAccountView.urlname, args=[self.account.id])
)
)
emails |= {billing_contact_email for billing_contact_email in billing_contact_emails}
if self.account.is_customer_billing_account:
enterprise_admin_emails = self.account.enterprise_admin_emails
emails |= {enterprise_admin_email for enterprise_admin_email in enterprise_admin_emails}
return emails
def set_billing_account_entry_point(self):
no_current_entry_point = self.account.entry_point == EntryPoint.NOT_SET
self_serve = self.service_type == SubscriptionType.PRODUCT
if no_current_entry_point and self_serve and not self.is_trial:
self.account.entry_point = EntryPoint.SELF_STARTED
self.account.save()
@classmethod
def get_active_subscription_by_domain(cls, domain_name_or_obj):
if settings.ENTERPRISE_MODE:
# Use the default plan, which is Enterprise when in ENTERPRISE_MODE
return None
if isinstance(domain_name_or_obj, Domain):
return cls._get_active_subscription_by_domain(domain_name_or_obj.name)
return cls._get_active_subscription_by_domain(domain_name_or_obj)
@classmethod
@quickcache(['domain_name'], timeout=60 * 60)
def _get_active_subscription_by_domain(cls, domain_name):
try:
return cls.visible_objects.select_related(
'plan_version__role'
).get(
is_active=True,
subscriber__domain=domain_name,
)
except cls.DoesNotExist:
return None
@classmethod
def get_subscribed_plan_by_domain(cls, domain):
"""
Returns SoftwarePlanVersion for the given domain.
"""
domain_obj = ensure_domain_instance(domain)
if domain_obj is None:
try:
return DefaultProductPlan.get_default_plan_version()
except DefaultProductPlan.DoesNotExist:
raise ProductPlanNotFoundError
else:
active_subscription = cls.get_active_subscription_by_domain(domain_obj.name)
if active_subscription is not None:
return active_subscription.plan_version
else:
return DefaultProductPlan.get_default_plan_version()
@classmethod
def new_domain_subscription(cls, account, domain, plan_version,
date_start=None, date_end=None, note=None,
web_user=None, adjustment_method=None, internal_change=False,
**kwargs):
if plan_version.plan.at_max_domains():
raise NewSubscriptionError(
'The maximum number of project spaces has been reached for %(plan_version)s. ' % {
'plan_version': plan_version,
}
)
if plan_version.plan.is_customer_software_plan != account.is_customer_billing_account:
if plan_version.plan.is_customer_software_plan:
raise NewSubscriptionError(
'You are trying to add a Customer Software Plan to a regular Billing Account. '
'Both or neither must be customer-level.'
)
else:
raise NewSubscriptionError(
'You are trying to add a regular Software Plan to a Customer Billing Account. '
'Both or neither must be customer-level.'
)
subscriber = Subscriber.objects.get_or_create(domain=domain)[0]
today = datetime.date.today()
date_start = date_start or today
# find subscriptions that end in the future / after this subscription
available_subs = Subscription.visible_objects.filter(
subscriber=subscriber,
)
future_subscription_no_end = available_subs.filter(
date_end__exact=None,
)
if date_end is not None:
future_subscription_no_end = future_subscription_no_end.filter(date_start__lt=date_end)
if future_subscription_no_end.count() > 0:
raise NewSubscriptionError(_(
"There is already a subscription '%s' with no end date "
"that conflicts with the start and end dates of this "
"subscription.") %
future_subscription_no_end.latest('date_created')
)
future_subscriptions = available_subs.filter(
date_end__gt=date_start
)
if date_end is not None:
future_subscriptions = future_subscriptions.filter(date_start__lt=date_end)
if future_subscriptions.count() > 0:
raise NewSubscriptionError(str(
_(
"There is already a subscription '%(sub)s' that has an end date "
"that conflicts with the start and end dates of this "
"subscription %(start)s - %(end)s."
) % {
'sub': future_subscriptions.latest('date_created'),
'start': date_start,
'end': date_end
}
))
can_reactivate, last_subscription = cls.can_reactivate_domain_subscription(
account, domain, plan_version, date_start=date_start
)
if can_reactivate:
last_subscription.reactivate_subscription(
date_end=date_end, note=note, web_user=web_user,
adjustment_method=adjustment_method,
**kwargs
)
return last_subscription
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
subscription = Subscription.visible_objects.create(
account=account,
plan_version=plan_version,
subscriber=subscriber,
date_start=date_start,
date_end=date_end,
**kwargs
)
subscription.is_active = is_active_subscription(date_start, date_end)
if subscription.is_active:
subscriber.create_subscription(
new_plan_version=plan_version,
new_subscription=subscription,
is_internal_change=internal_change,
)
SubscriptionAdjustment.record_adjustment(
subscription, method=adjustment_method, note=note,
web_user=web_user
)
subscription.save()
subscription.set_billing_account_entry_point()
return subscription
@classmethod
def can_reactivate_domain_subscription(cls, account, domain, plan_version,
date_start=None):
subscriber = Subscriber.objects.get_or_create(domain=domain)[0]
date_start = date_start or datetime.date.today()
last_subscription = Subscription.visible_objects.filter(
subscriber=subscriber, date_end=date_start
)
if not last_subscription.exists():
return False, None
last_subscription = last_subscription.latest('date_created')
return (
last_subscription.account.pk == account.pk and
last_subscription.plan_version.pk == plan_version.pk
), last_subscription
@property
def is_below_minimum_subscription(self):
if self.is_trial:
return False
elif self.date_start < datetime.date(2018, 9, 5):
# Only block upgrades for subscriptions created after the date we launched the 30-Day Minimum
return False
elif self.date_start + datetime.timedelta(days=MINIMUM_SUBSCRIPTION_LENGTH) >= datetime.date.today():
return True
else:
return False
def user_can_change_subscription(self, user):
if user.is_superuser:
return True
elif self.account.is_customer_billing_account:
return self.account.has_enterprise_admin(user.email)
else:
return True
class InvoiceBaseManager(models.Manager):
def get_queryset(self):
return super(InvoiceBaseManager, self).get_queryset().filter(is_hidden_to_ops=False)
class InvoiceBase(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
is_hidden = models.BooleanField(default=False)
tax_rate = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
balance = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
date_due = models.DateField(db_index=True, null=True)
date_paid = models.DateField(blank=True, null=True)
date_start = models.DateField()
date_end = models.DateField()
# If set to True invoice will not appear in invoice report. There is no UI to
# control this filter
is_hidden_to_ops = models.BooleanField(default=False)
last_modified = models.DateTimeField(auto_now=True)
objects = InvoiceBaseManager()
api_objects = Manager()
class Meta(object):
abstract = True
@property
def is_customer_invoice(self):
return False
@property
def invoice_number(self):
ops_num = settings.INVOICE_STARTING_NUMBER + self.id
return "%s%d" % (settings.INVOICE_PREFIX, ops_num)
@property
def is_wire(self):
return False
def get_domain(self):
raise NotImplementedError()
@property
def account(self):
raise NotImplementedError()
@property
def is_paid(self):
return bool(self.date_paid)
@property
def email_recipients(self):
raise NotImplementedError
class WireInvoice(InvoiceBase):
# WireInvoice is tied to a domain, rather than a subscription
domain = models.CharField(max_length=100)
class Meta(object):
app_label = 'accounting'
@property
@memoized
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
@property
def subtotal(self):
return self.balance
@property
def is_wire(self):
return True
@property
def is_prepayment(self):
return False
def get_domain(self):
return self.domain
def get_total(self):
return self.balance
@property
def email_recipients(self):
try:
original_record = WireBillingRecord.objects.filter(invoice=self).order_by('-date_created')[0]
return original_record.emailed_to_list
except IndexError:
log_accounting_error(
"Strange that WireInvoice %d has no associated WireBillingRecord. "
"Should investigate."
% self.id
)
return []
class WirePrepaymentInvoice(WireInvoice):
class Meta(object):
app_label = 'accounting'
proxy = True
items = []
@property
def is_prepayment(self):
return True
class Invoice(InvoiceBase):
"""
This is what we'll use to calculate the balance on the accounts based on the current balance
held by the Invoice. Balance updates will be tied to CreditAdjustmentTriggers which are tied
to CreditAdjustments.
"""
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT)
class Meta(object):
app_label = 'accounting'
def save(self, *args, **kwargs):
from corehq.apps.accounting.mixins import get_overdue_invoice
super(Invoice, self).save(*args, **kwargs)
get_overdue_invoice.clear(self.subscription.subscriber.domain)
@property
def email_recipients(self):
if self.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return [settings.ACCOUNTS_EMAIL]
else:
return self.contact_emails
@property
def contact_emails(self):
try:
billing_contact_info = BillingContactInfo.objects.get(account=self.account)
contact_emails = billing_contact_info.email_list
except BillingContactInfo.DoesNotExist:
contact_emails = []
if not contact_emails:
from corehq.apps.accounting.views import ManageBillingAccountView
admins = WebUser.get_admins_by_domain(self.get_domain())
contact_emails = [admin.email if admin.email else admin.username for admin in admins]
if not settings.UNIT_TESTING:
_soft_assert_contact_emails_missing(
False,
"Could not find an email to send the invoice "
"email to for the domain %s. Sending to domain admins instead: %s."
" Add client contact emails here: %s" % (
self.get_domain(),
', '.join(contact_emails),
absolute_reverse(ManageBillingAccountView.urlname, args=[self.account.id]),
)
)
return contact_emails
@property
def subtotal(self):
"""
This will be inserted in the subtotal field on the printed invoice.
"""
if self.lineitem_set.count() == 0:
return Decimal('0.0000')
return sum([line_item.total for line_item in self.lineitem_set.all()])
@property
def applied_tax(self):
return Decimal('%.4f' % round(self.tax_rate * self.subtotal, 4))
@property
@memoized
def account(self):
return self.subscription.account
@property
def applied_credit(self):
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
def get_total(self):
"""
This will be inserted in the total field on the printed invoice.
"""
return self.subtotal + self.applied_tax + self.applied_credit
def update_balance(self):
self.balance = self.get_total()
if self.balance <= 0:
self.date_paid = datetime.date.today()
else:
self.date_paid = None
def calculate_credit_adjustments(self):
"""
This goes through all credit lines that:
- do not have feature/product rates, but specify the related subscription and billing account
- do not have feature/product rates or a subscription, but specify the related billing account
"""
# first apply credits to all the line items
for line_item in self.lineitem_set.all():
line_item.calculate_credit_adjustments()
# finally, apply credits to the leftover invoice balance
current_total = self.get_total()
credit_lines = CreditLine.get_credits_for_invoice(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, invoice=self)
@classmethod
def exists_for_domain(cls, domain):
return cls.objects.filter(
subscription__subscriber__domain=domain, is_hidden=False
).count() > 0
def get_domain(self):
return self.subscription.subscriber.domain
@classmethod
def autopayable_invoices(cls, date_due):
""" Invoices that can be auto paid on date_due """
invoices = cls.objects.select_related('subscription__account').filter(
date_due=date_due,
is_hidden=False,
subscription__account__auto_pay_user__isnull=False,
)
return invoices
def pay_invoice(self, payment_record):
CreditLine.make_payment_towards_invoice(
invoice=self,
payment_record=payment_record,
)
self.update_balance()
self.save()
class CustomerInvoice(InvoiceBase):
# CustomerInvoice is tied to a customer level account, instead of a subscription
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
subscriptions = models.ManyToManyField(Subscription, default=list, blank=True)
class Meta(object):
app_label = 'accounting'
@property
def is_customer_invoice(self):
return True
def get_domain(self):
return None
@property
def email_recipients(self):
try:
billing_contact_info = BillingContactInfo.objects.get(account=self.account)
contact_emails = billing_contact_info.email_list
except BillingContactInfo.DoesNotExist:
contact_emails = []
return contact_emails
@property
def contact_emails(self):
return self.account.enterprise_admin_emails
@property
def subtotal(self):
"""
This will be inserted in the subtotal field on the printed invoice.
"""
if self.lineitem_set.count() == 0:
return Decimal('0.0000')
return sum([line_item.total for line_item in self.lineitem_set.all()])
@property
def applied_tax(self):
return Decimal('%.4f' % round(self.tax_rate * self.subtotal, 4))
@property
def applied_credit(self):
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
def get_total(self):
"""
This will be inserted in the total field on the printed invoice.
"""
return self.subtotal + self.applied_tax + self.applied_credit
def update_balance(self):
self.balance = self.get_total()
if self.balance <= 0:
self.date_paid = datetime.date.today()
else:
self.date_paid = None
def calculate_credit_adjustments(self):
for line_item in self.lineitem_set.all():
line_item.calculate_credit_adjustments()
current_total = self.get_total()
credit_lines = CreditLine.get_credits_for_customer_invoice(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, customer_invoice=self)
def pay_invoice(self, payment_record):
CreditLine.make_payment_towards_invoice(
invoice=self,
payment_record=payment_record,
)
self.update_balance()
self.save()
@classmethod
def exists_for_domain(cls, domain):
invoices = cls.objects.filter(is_hidden=False)
for subscription in invoices.subscriptions.filter(is_hidden=False):
if subscription.subscriber.domain == domain:
return True
return False
@classmethod
def autopayable_invoices(cls, date_due):
""" Invoices that can be auto paid on date_due """
invoices = cls.objects.select_related('account').filter(
date_due=date_due,
is_hidden=False,
account__auto_pay_user__isnull=False
)
return invoices
class SubscriptionAdjustment(models.Model):
"""
A record of any adjustments made to a subscription, so we always have a paper trail.
Things that cannot be modified after a subscription is created:
- account
- plan
- subscriber
Things that have limited modification abilities:
- dates if the current date is today or earlier
All other modifications require cancelling the current subscription and creating a new one.
Note: related_subscription is the subscription to be filled in when the subscription is upgraded / downgraded.
"""
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT)
reason = models.CharField(max_length=50, default=SubscriptionAdjustmentReason.CREATE,
choices=SubscriptionAdjustmentReason.CHOICES)
method = models.CharField(max_length=50, default=SubscriptionAdjustmentMethod.INTERNAL,
choices=SubscriptionAdjustmentMethod.CHOICES)
note = models.TextField(null=True)
web_user = models.CharField(max_length=80, null=True)
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True)
related_subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT, null=True,
related_name='subscriptionadjustment_related')
date_created = models.DateTimeField(auto_now_add=True)
new_date_start = models.DateField()
new_date_end = models.DateField(blank=True, null=True)
new_date_delay_invoicing = models.DateField(blank=True, null=True)
new_salesforce_contract_id = models.CharField(blank=True, null=True, max_length=80)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@classmethod
def record_adjustment(cls, subscription, **kwargs):
adjustment = SubscriptionAdjustment(
subscription=subscription,
new_date_start=subscription.date_start,
new_date_end=subscription.date_end,
new_salesforce_contract_id=subscription.salesforce_contract_id,
**kwargs
)
adjustment.save()
return adjustment
class BillingRecordBase(models.Model):
"""
This stores any interaction we have with the client in sending a physical / pdf invoice to their contact email.
"""
date_created = models.DateTimeField(auto_now_add=True, db_index=True)
emailed_to_list = ArrayField(models.EmailField(), default=list)
skipped_email = models.BooleanField(default=False)
pdf_data_id = models.CharField(max_length=48)
last_modified = models.DateTimeField(auto_now=True)
INVOICE_HTML_TEMPLATE = 'accounting/email/invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/invoice.txt'
class Meta(object):
abstract = True
_pdf = None
@property
def pdf(self):
if self._pdf is None:
return InvoicePdf.get(self.pdf_data_id)
return self._pdf
@property
def html_template(self):
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
raise NotImplementedError("should_send_email is required")
@classmethod
def generate_record(cls, invoice):
record = cls(invoice=invoice)
invoice_pdf = InvoicePdf()
invoice_pdf.generate_pdf(record.invoice)
record.pdf_data_id = invoice_pdf._id
record._pdf = invoice_pdf
record.save()
return record
def handle_throttled_email(self, contact_emails):
self.skipped_email = True
month_name = self.invoice.date_start.strftime("%B")
self.save()
log_accounting_info(
"Throttled billing statements for domain %(domain)s "
"to %(emails)s." % {
'domain': self.invoice.get_domain(),
'emails': ', '.join(contact_emails),
}
)
raise InvoiceEmailThrottledError(
"Invoice communications exceeded the maximum limit of "
"%(max_limit)d for domain %(domain)s for the month of "
"%(month_name)s." % {
'max_limit': MAX_INVOICE_COMMUNICATIONS,
'domain': self.invoice.get_domain(),
'month_name': month_name,
})
def email_context(self):
from corehq.apps.domain.views.accounting import DomainBillingStatementsView
from corehq.apps.domain.views.settings import DefaultProjectSettingsView
month_name = self.invoice.date_start.strftime("%B")
domain = self.invoice.get_domain()
context = {
'month_name': month_name,
'domain': domain,
'domain_url': absolute_reverse(DefaultProjectSettingsView.urlname,
args=[domain]),
'statement_number': self.invoice.invoice_number,
'payment_status': (_("Paid") if self.invoice.is_paid
else _("Payment Required")),
'amount_due': fmt_dollar_amount(self.invoice.balance),
'statements_url': absolute_reverse(
DomainBillingStatementsView.urlname, args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
'accounts_email': settings.ACCOUNTS_EMAIL,
}
return context
def email_subject(self):
raise NotImplementedError()
def can_view_statement(self, web_user):
raise NotImplementedError()
def send_email(self, contact_email=None, cc_emails=None):
pdf_attachment = {
'title': self.pdf.get_filename(self.invoice),
'file_obj': BytesIO(self.pdf.get_data(self.invoice)),
'mimetype': 'application/pdf',
}
domain = self.invoice.get_domain()
subject = self.email_subject()
context = self.email_context()
email_from = self.email_from()
greeting = _("Hello,")
can_view_statement = False
web_user = WebUser.get_by_username(contact_email)
if web_user is not None:
if web_user.first_name:
greeting = _("Dear %s,") % web_user.first_name
can_view_statement = self.can_view_statement(web_user)
context['greeting'] = greeting
context['can_view_statement'] = can_view_statement
email_html = render_to_string(self.html_template, context)
email_plaintext = render_to_string(self.text_template, context)
send_html_email_async.delay(
subject, contact_email, email_html,
text_content=email_plaintext,
email_from=email_from,
file_attachments=[pdf_attachment],
cc=cc_emails
)
self.emailed_to_list.extend([contact_email])
if cc_emails:
self.emailed_to_list.extend(cc_emails)
self.save()
if self.invoice.is_customer_invoice:
log_message = "Sent billing statements for account %(account)s to %(emails)s." % {
'account': self.invoice.account,
'emails': contact_email,
}
else:
log_message = "Sent billing statements for domain %(domain)s to %(emails)s." % {
'domain': domain,
'emails': contact_email,
}
log_accounting_info(log_message)
class WireBillingRecord(BillingRecordBase):
invoice = models.ForeignKey(WireInvoice, on_delete=models.PROTECT)
INVOICE_HTML_TEMPLATE = 'accounting/email/wire_invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/wire_invoice.txt'
class Meta(object):
app_label = 'accounting'
@property
def should_send_email(self):
hidden = self.invoice.is_hidden
return not hidden
@staticmethod
def is_email_throttled():
return False
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s Bulk Billing Statement for Project Space %(domain)s" % {
'month': month_name,
'domain': self.invoice.get_domain(),
}
@staticmethod
def email_from():
return "Dimagi Accounting <{email}>".format(email=settings.INVOICING_CONTACT_EMAIL)
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class WirePrepaymentBillingRecord(WireBillingRecord):
class Meta(object):
app_label = 'accounting'
proxy = True
def email_subject(self):
return _("Your prepayment invoice")
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class BillingRecord(BillingRecordBase):
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT)
INVOICE_CONTRACTED_HTML_TEMPLATE = 'accounting/email/invoice_contracted.html'
INVOICE_CONTRACTED_TEXT_TEMPLATE = 'accounting/email/invoice_contracted.txt'
INVOICE_AUTOPAY_HTML_TEMPLATE = 'accounting/email/invoice_autopayment.html'
INVOICE_AUTOPAY_TEXT_TEMPLATE = 'accounting/email/invoice_autopayment.txt'
class Meta(object):
app_label = 'accounting'
@property
def html_template(self):
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return self.INVOICE_CONTRACTED_HTML_TEMPLATE
if self.invoice.subscription.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_HTML_TEMPLATE
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return self.INVOICE_CONTRACTED_TEXT_TEMPLATE
if self.invoice.subscription.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_TEXT_TEMPLATE
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
subscription = self.invoice.subscription
autogenerate = (subscription.auto_generate_credits and not self.invoice.balance)
small_contracted = (self.invoice.balance <= SMALL_INVOICE_THRESHOLD and
subscription.service_type == SubscriptionType.IMPLEMENTATION)
hidden = self.invoice.is_hidden
do_not_email_invoice = self.invoice.subscription.do_not_email_invoice
return not (autogenerate or small_contracted or hidden or do_not_email_invoice)
def is_email_throttled(self):
month = self.invoice.date_start.month
year = self.invoice.date_start.year
date_start, date_end = get_first_last_days(year, month)
return self.__class__.objects.filter(
invoice__date_start__lte=date_end, invoice__date_end__gte=date_start,
invoice__subscription__subscriber=self.invoice.subscription.subscriber,
invoice__is_hidden_to_ops=False,
).count() > MAX_INVOICE_COMMUNICATIONS
def email_context(self):
context = super(BillingRecord, self).email_context()
total_balance = sum(invoice.balance for invoice in Invoice.objects.filter(
is_hidden=False,
subscription__subscriber__domain=self.invoice.get_domain(),
))
is_small_invoice = self.invoice.balance < SMALL_INVOICE_THRESHOLD
payment_status = (_("Paid")
if self.invoice.is_paid or total_balance == 0
else _("Payment Required"))
context.update({
'plan_name': self.invoice.subscription.plan_version.plan.name,
'date_due': self.invoice.date_due,
'is_small_invoice': is_small_invoice,
'total_balance': total_balance,
'is_total_balance_due': total_balance >= SMALL_INVOICE_THRESHOLD,
'payment_status': payment_status,
})
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
from corehq.apps.accounting.dispatcher import AccountingAdminInterfaceDispatcher
context.update({
'salesforce_contract_id': self.invoice.subscription.salesforce_contract_id,
'billing_account': self.invoice.subscription.account.name,
'billing_contacts': self.invoice.contact_emails,
'admin_invoices_url': "{url}?subscriber={domain}".format(
url=absolute_reverse(AccountingAdminInterfaceDispatcher.name(), args=['invoices']),
domain=self.invoice.get_domain()
)
})
if self.invoice.subscription.account.auto_pay_enabled:
try:
last_4 = getattr(self.invoice.subscription.account.autopay_card, 'last4', None)
except StripePaymentMethod.DoesNotExist:
last_4 = None
context.update({
'auto_pay_user': self.invoice.subscription.account.auto_pay_user,
'last_4': last_4,
})
context.update({
'credits': self.credits,
})
return context
def credits(self):
credits = {
'account': {},
'subscription': {},
}
self._add_product_credits(credits)
self._add_user_credits(credits)
self._add_sms_credits(credits)
self._add_general_credits(credits)
return credits
def _add_product_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__product_rate__isnull=False,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
is_product=True,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'product': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
is_product=True,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'product': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_user_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.USER,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
feature_type=FeatureType.USER,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'user': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
feature_type=FeatureType.USER,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'user': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_sms_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.SMS,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
feature_type=FeatureType.SMS,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'sms': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
feature_type=FeatureType.SMS,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'sms': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_general_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate=None,
line_item__product_rate=None,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'general': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'general': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s CommCare Billing Statement for Project Space %(domain)s" % {
'month': month_name,
'domain': self.invoice.subscription.subscriber.domain,
}
def email_from(self):
return get_dimagi_from_email()
@staticmethod
def _get_total_balance(credit_lines):
return (
sum([credit_line.balance for credit_line in credit_lines])
if credit_lines else Decimal('0.0')
)
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class CustomerBillingRecord(BillingRecordBase):
invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT)
INVOICE_AUTOPAY_HTML_TEMPLATE = 'accounting/email/invoice_autopayment.html'
INVOICE_AUTOPAY_TEXT_TEMPLATE = 'accounting/email/invoice_autopayment.txt'
INVOICE_HTML_TEMPLATE = 'accounting/email/customer_invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/customer_invoice.txt'
class Meta(object):
app_label = 'accounting'
@property
def html_template(self):
if self.invoice.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_HTML_TEMPLATE
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
if self.invoice.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_TEXT_TEMPLATE
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
return not self.invoice.is_hidden
def email_context(self):
from corehq.apps.accounting.views import EnterpriseBillingStatementsView
context = super(CustomerBillingRecord, self).email_context()
is_small_invoice = self.invoice.balance < SMALL_INVOICE_THRESHOLD
payment_status = (_("Paid")
if self.invoice.is_paid or self.invoice.balance == 0
else _("Payment Required"))
# Random domain, because all subscriptions on a customer account link to the same Enterprise Dashboard
domain = self.invoice.subscriptions.first().subscriber.domain
context.update({
'account_name': self.invoice.account.name,
'date_due': self.invoice.date_due,
'is_small_invoice': is_small_invoice,
'total_balance': '{:.2f}'.format(self.invoice.balance),
'is_total_balance_due': self.invoice.balance >= SMALL_INVOICE_THRESHOLD,
'payment_status': payment_status,
'statements_url': absolute_reverse(
EnterpriseBillingStatementsView.urlname, args=[domain]),
})
if self.invoice.account.auto_pay_enabled:
try:
last_4 = getattr(self.invoice.account.autopay_card, 'last4', None)
except StripePaymentMethod.DoesNotExist:
last_4 = None
context.update({
'auto_pay_user': self.invoice.account.auto_pay_user,
'last_4': last_4,
})
context.update({
'credits': self.credits,
})
return context
def credits(self):
credits = {
'account': {},
'subscription': {},
}
self._add_product_credits(credits)
self._add_user_credits(credits)
self._add_sms_credits(credits)
self._add_general_credits(credits)
return credits
def _add_product_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__product_rate__isnull=False
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
is_product=True
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credit_adjustments['subscription'].update({
'product': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
is_product=True
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'product': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_user_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.USER
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
feature_type=FeatureType.USER
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'user': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
feature_type=FeatureType.USER
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'user': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_sms_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.SMS
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
feature_type=FeatureType.SMS
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'sms': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
feature_type=FeatureType.SMS
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'sms': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_general_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate=None,
line_item__product_rate=None
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'general': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'general': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _subscriptions_in_credit_adjustments(self, credit_adjustments):
for subscription in self.invoice.subscriptions.all():
if credit_adjustments.filter(
credit_line__subscription=subscription
):
return True
return False
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s CommCare Billing Statement for Customer Account %(account_name)s" % {
'month': month_name,
'account_name': self.invoice.account.name,
}
def email_from(self):
return get_dimagi_from_email()
@staticmethod
def _get_total_balance(credit_lines):
return (
sum([credit_line.balance for credit_line in credit_lines])
if credit_lines else Decimal('0.0')
)
def can_view_statement(self, web_user):
for subscription in self.invoice.subscriptions.all():
if web_user.is_domain_admin(subscription.subscriber.domain):
return True
return False
class InvoicePdf(BlobMixin, SafeSaveDocument):
invoice_id = StringProperty()
date_created = DateTimeProperty()
is_wire = BooleanProperty(default=False)
is_customer = BooleanProperty(default=False)
_blobdb_type_code = CODES.invoice
def generate_pdf(self, invoice):
self.save()
domain = invoice.get_domain()
pdf_data = NamedTemporaryFile()
account_name = ''
if invoice.is_customer_invoice:
account_name = invoice.account.name
template = InvoiceTemplate(
pdf_data.name,
invoice_number=invoice.invoice_number,
to_address=get_address_from_invoice(invoice),
project_name=domain,
invoice_date=invoice.date_created.date(),
due_date=invoice.date_due,
date_start=invoice.date_start,
date_end=invoice.date_end,
subtotal=invoice.subtotal,
tax_rate=invoice.tax_rate,
applied_tax=getattr(invoice, 'applied_tax', Decimal('0.000')),
applied_credit=getattr(invoice, 'applied_credit', Decimal('0.000')),
total=invoice.get_total(),
is_wire=invoice.is_wire,
is_customer=invoice.is_customer_invoice,
is_prepayment=invoice.is_wire and invoice.is_prepayment,
account_name=account_name
)
if not invoice.is_wire:
if invoice.is_customer_invoice:
line_items = LineItem.objects.filter(customer_invoice=invoice)
else:
line_items = LineItem.objects.filter(subscription_invoice=invoice)
for line_item in line_items:
is_unit = line_item.unit_description is not None
is_quarterly = line_item.invoice.is_customer_invoice and \
line_item.invoice.account.invoicing_plan != InvoicingPlan.MONTHLY
unit_cost = line_item.subtotal
if is_unit:
unit_cost = line_item.unit_cost
if is_quarterly and line_item.base_description is not None:
unit_cost = line_item.product_rate.monthly_fee
description = line_item.base_description or line_item.unit_description
if line_item.quantity > 0:
template.add_item(
description,
line_item.quantity if is_unit or is_quarterly else 1,
unit_cost,
line_item.subtotal,
line_item.applied_credit,
line_item.total
)
if invoice.is_wire and invoice.is_prepayment:
unit_cost = 1
applied_credit = 0
for item in invoice.items:
template.add_item(item['type'],
item['amount'],
unit_cost,
item['amount'],
applied_credit,
item['amount'])
template.get_pdf()
filename = self.get_filename(invoice)
blob_domain = domain or UNKNOWN_DOMAIN
# this is slow and not unit tested
# best to just skip during unit tests for speed
if not settings.UNIT_TESTING:
self.put_attachment(pdf_data, filename, 'application/pdf', domain=blob_domain)
else:
self.put_attachment('', filename, 'application/pdf', domain=blob_domain)
pdf_data.close()
self.invoice_id = str(invoice.id)
self.date_created = datetime.datetime.utcnow()
self.is_wire = invoice.is_wire
self.is_customer = invoice.is_customer_invoice
self.save()
@staticmethod
def get_filename(invoice):
return "statement_%(year)d_%(month)d.pdf" % {
'year': invoice.date_start.year,
'month': invoice.date_start.month,
}
def get_data(self, invoice):
with self.fetch_attachment(self.get_filename(invoice), stream=True) as fh:
return fh.read()
class LineItemManager(models.Manager):
def get_products(self):
return self.get_queryset().filter(feature_rate__exact=None)
def get_features(self):
return self.get_queryset().filter(product_rate__exact=None)
def get_feature_by_type(self, feature_type):
return self.get_queryset().filter(feature_rate__feature__feature_type=feature_type)
class LineItem(models.Model):
subscription_invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True)
customer_invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT, null=True)
feature_rate = models.ForeignKey(FeatureRate, on_delete=models.PROTECT, null=True)
product_rate = models.ForeignKey(SoftwareProductRate, on_delete=models.PROTECT, null=True)
base_description = models.TextField(blank=True, null=True)
base_cost = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
unit_description = models.TextField(blank=True, null=True)
unit_cost = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
quantity = models.IntegerField(default=1, validators=integer_field_validators)
last_modified = models.DateTimeField(auto_now=True)
objects = LineItemManager()
class Meta(object):
app_label = 'accounting'
@property
def invoice(self):
if self.subscription_invoice:
return self.subscription_invoice
else:
return self.customer_invoice
@invoice.setter
def invoice(self, invoice):
if invoice.is_customer_invoice:
self.customer_invoice = invoice
else:
self.subscription_invoice = invoice
@property
def subtotal(self):
if self.customer_invoice and self.customer_invoice.account.invoicing_plan != InvoicingPlan.MONTHLY:
return self.base_cost * self.quantity + self.unit_cost * self.quantity
return self.base_cost + self.unit_cost * self.quantity
@property
def applied_credit(self):
"""
The total amount of credit applied specifically to this LineItem.
"""
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
@property
def total(self):
return self.subtotal + self.applied_credit
def calculate_credit_adjustments(self):
"""
This goes through all credit lines that:
- specify the related feature or product rate that generated this line item
"""
current_total = self.total
credit_lines = CreditLine.get_credits_for_line_item(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, line_item=self)
class CreditLine(models.Model):
"""
The amount of money in USD that exists can can be applied toward a specific account,
a specific subscription, or specific rates in that subscription.
"""
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT, null=True, blank=True)
is_product = models.BooleanField(default=False)
feature_type = models.CharField(max_length=10, null=True, blank=True,
choices=FeatureType.CHOICES)
date_created = models.DateTimeField(auto_now_add=True)
balance = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
credit_level = ("Account-Level" if self.subscription is None
else "Subscription-Level")
return ("%(level)s credit [Account %(account_id)d]%(feature)s"
"%(product)s, balance %(balance)s" % {
'level': credit_level,
'account_id': self.account.id,
'feature': (' for Feature %s' % self.feature_type
if self.feature_type is not None else ""),
'product': (' for Product'
if self.is_product else ""),
'balance': self.balance,
})
def save(self, *args, **kwargs):
from corehq.apps.accounting.mixins import (
get_credits_available_for_product_in_account,
get_credits_available_for_product_in_subscription,
)
super(CreditLine, self).save(*args, **kwargs)
if self.account:
get_credits_available_for_product_in_account.clear(self.account)
if self.subscription:
get_credits_available_for_product_in_subscription.clear(self.subscription)
def adjust_credit_balance(self, amount, is_new=False, note=None,
line_item=None, invoice=None, customer_invoice=None,
payment_record=None, related_credit=None,
reason=None, web_user=None):
note = note or ""
if line_item is not None and (invoice is not None or customer_invoice is not None):
raise CreditLineError("You may only have an invoice OR a line item making this adjustment.")
if reason is None:
reason = CreditAdjustmentReason.MANUAL
if payment_record is not None:
reason = CreditAdjustmentReason.DIRECT_PAYMENT
elif related_credit is not None:
reason = CreditAdjustmentReason.TRANSFER
elif invoice is not None:
reason = CreditAdjustmentReason.INVOICE
elif customer_invoice is not None:
reason = CreditAdjustmentReason.INVOICE
elif line_item is not None:
reason = CreditAdjustmentReason.LINE_ITEM
if is_new:
note = "Initialization of credit line. %s" % note
credit_adjustment = CreditAdjustment(
credit_line=self,
note=note,
amount=amount,
reason=reason,
payment_record=payment_record,
line_item=line_item,
invoice=invoice,
customer_invoice=customer_invoice,
related_credit=related_credit,
web_user=web_user,
)
credit_adjustment.save()
self.balance = F('balance') + amount
self.save()
self.refresh_from_db()
@classmethod
def get_credits_for_line_item(cls, line_item):
is_product = line_item.product_rate is not None
feature_type = (
line_item.feature_rate.feature.feature_type
if line_item.feature_rate is not None else None
)
assert is_product or feature_type
assert not (is_product and feature_type)
if line_item.invoice.is_customer_invoice:
return cls.get_credits_for_line_item_in_customer_invoice(line_item, feature_type, is_product)
else:
return cls.get_credits_for_line_item_in_invoice(line_item, feature_type, is_product)
@classmethod
def get_credits_for_line_item_in_invoice(cls, line_item, feature_type, is_product):
if feature_type:
return itertools.chain(
cls.get_credits_by_subscription_and_features(
line_item.invoice.subscription,
feature_type=feature_type,
),
cls.get_credits_for_account(
line_item.invoice.subscription.account,
feature_type=feature_type,
)
)
if is_product:
return itertools.chain(
cls.get_credits_by_subscription_and_features(
line_item.invoice.subscription,
is_product=True,
),
cls.get_credits_for_account(
line_item.invoice.subscription.account,
is_product=True,
)
)
@classmethod
def get_credits_for_line_item_in_customer_invoice(cls, line_item, feature_type, is_product):
if feature_type:
return itertools.chain(
cls.get_credits_for_subscriptions(
subscriptions=line_item.invoice.subscriptions.all(),
feature_type=feature_type
),
cls.get_credits_for_account(
account=line_item.invoice.account,
feature_type=feature_type
)
)
if is_product:
return itertools.chain(
cls.get_credits_for_subscriptions(
subscriptions=line_item.invoice.subscriptions.all(),
is_product=is_product
),
cls.get_credits_for_account(
account=line_item.invoice.account,
is_product=is_product
)
)
@classmethod
def get_credits_for_invoice(cls, invoice):
relevant_credits = [
cls.get_credits_by_subscription_and_features(invoice.subscription),
cls.get_credits_for_account(invoice.subscription.account)
]
if invoice.subscription.next_subscription:
# check for a transfer of subscription credits due to upgrades by
# looking first at the active subscription or the "next" subscription
# if the accounts don't match with the active subscription.
active_sub = Subscription.get_active_subscription_by_domain(
invoice.subscription.subscriber.domain
)
if active_sub.account == invoice.subscription.account:
relevant_credits.append(
cls.get_credits_by_subscription_and_features(active_sub)
)
elif (invoice.subscription.next_subscription.account
== invoice.subscription.account):
relevant_credits.append(
cls.get_credits_by_subscription_and_features(
invoice.subscription.next_subscription
)
)
return itertools.chain(*relevant_credits)
@classmethod
def get_credits_for_customer_invoice(cls, invoice):
return itertools.chain(
cls.get_credits_for_subscriptions(invoice.subscriptions.all()),
cls.get_credits_for_account(invoice.account)
)
@classmethod
def get_credits_for_subscriptions(cls, subscriptions, feature_type=None, is_product=False):
credit_list = cls.objects.none()
for subscription in subscriptions.all():
credit_list = credit_list.union(cls.get_credits_by_subscription_and_features(
subscription,
feature_type=feature_type,
is_product=is_product
))
return credit_list
@classmethod
def get_credits_for_account(cls, account, feature_type=None, is_product=False):
assert not (feature_type and is_product)
return cls.objects.filter(
account=account, subscription__exact=None, is_active=True
).filter(
is_product=is_product, feature_type__exact=feature_type
).all()
@classmethod
def get_credits_by_subscription_and_features(cls, subscription,
feature_type=None,
is_product=False):
assert not (feature_type and is_product)
return cls.objects.filter(
subscription=subscription,
feature_type__exact=feature_type,
is_product=is_product,
is_active=True
).all()
@classmethod
def get_non_general_credits_by_subscription(cls, subscription):
return cls.objects.filter(subscription=subscription, is_active=True).filter(
Q(is_product=True) |
Q(feature_type__in=[f[0] for f in FeatureType.CHOICES])
).all()
@classmethod
def add_credit(cls, amount, account=None, subscription=None,
is_product=False, feature_type=None, payment_record=None,
invoice=None, customer_invoice=None, line_item=None, related_credit=None,
note=None, reason=None, web_user=None, permit_inactive=False):
if account is None and subscription is None:
raise CreditLineError(
"You must specify either a subscription "
"or account to add this credit to."
)
if feature_type is not None and is_product:
raise CreditLineError(
"Can only add credit for a product OR a feature, but not both."
)
account = account or subscription.account
try:
credit_line = cls.objects.get(
account__exact=account,
subscription__exact=subscription,
is_product=is_product,
feature_type__exact=feature_type,
is_active=True
)
if not permit_inactive and not credit_line.is_active and not invoice:
raise CreditLineError(
"Could not add credit to CreditLine %s because it is "
"inactive." % str(credit_line)
)
is_new = False
except cls.MultipleObjectsReturned as e:
raise CreditLineError(
"Could not find a unique credit line for %(account)s"
"%(subscription)s%(feature)s%(product)s. %(error)s"
"instead." % {
'account': "Account ID %d" % account.id,
'subscription': (" | Subscription ID %d" % subscription.id
if subscription is not None else ""),
'feature': (" | Feature %s" % feature_type
if feature_type is not None else ""),
'product': (" | Product" if is_product else ""),
'error': str(e),
}
)
except cls.DoesNotExist:
credit_line = cls.objects.create(
account=account,
subscription=subscription,
is_product=is_product,
feature_type=feature_type,
)
is_new = True
credit_line.adjust_credit_balance(amount, is_new=is_new, note=note,
payment_record=payment_record,
invoice=invoice, customer_invoice=customer_invoice, line_item=line_item,
related_credit=related_credit,
reason=reason, web_user=web_user)
return credit_line
@classmethod
def apply_credits_toward_balance(cls, credit_lines, balance, **kwargs):
for credit_line in credit_lines:
if balance == Decimal('0.0000'):
return
if balance <= Decimal('0.0000'):
raise CreditLineError(
"A balance went below zero dollars when applying credits "
"to credit line %d." % credit_line.pk
)
adjustment_amount = min(credit_line.balance, balance)
if adjustment_amount > Decimal('0.0000'):
credit_line.adjust_credit_balance(-adjustment_amount, **kwargs)
balance -= adjustment_amount
@classmethod
def make_payment_towards_invoice(cls, invoice, payment_record):
""" Make a payment for a billing account towards an invoice """
if invoice.is_customer_invoice:
billing_account = invoice.account
else:
billing_account = invoice.subscription.account
cls.add_credit(
payment_record.amount,
account=billing_account,
payment_record=payment_record,
)
cls.add_credit(
-payment_record.amount,
account=billing_account,
invoice=invoice,
)
class PaymentMethod(models.Model):
"""A link to a particular payment method for an account.
Right now the only payment methods are via Stripe, but leaving that
open for future changes.
:customer_id: is used by the API of the payment method we're using that
uniquely identifies the payer on their end.
"""
web_user = models.CharField(max_length=80, db_index=True)
method_type = models.CharField(max_length=50,
default=PaymentMethodType.STRIPE,
choices=PaymentMethodType.CHOICES,
db_index=True)
customer_id = models.CharField(max_length=255, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
unique_together = ('web_user', 'method_type')
class StripePaymentMethod(PaymentMethod):
""" Do stuff with Stripe """
class Meta(object):
proxy = True
app_label = 'accounting'
STRIPE_GENERIC_ERROR = (stripe.error.AuthenticationError,
stripe.error.InvalidRequestError,
stripe.error.APIConnectionError,
stripe.error.StripeError,)
@property
def customer(self):
return self._get_or_create_stripe_customer()
def _get_or_create_stripe_customer(self):
customer = None
if self.customer_id is not None:
try:
customer = self._get_stripe_customer()
except stripe.InvalidRequestError:
pass
if customer is None:
customer = self._create_stripe_customer()
return customer
def _create_stripe_customer(self):
customer = stripe.Customer.create(
description="{}'s cards".format(self.web_user),
email=self.web_user,
)
self.customer_id = customer.id
self.save()
return customer
def _get_stripe_customer(self):
return stripe.Customer.retrieve(self.customer_id)
@property
def all_cards(self):
try:
return [card for card in self.customer.cards.data if card is not None]
except stripe.error.AuthenticationError:
if not settings.STRIPE_PRIVATE_KEY:
log_accounting_info("Private key is not defined in settings")
return []
else:
raise
def all_cards_serialized(self, billing_account):
return [{
'brand': card.brand,
'last4': card.last4,
'exp_month': card.exp_month,
'exp_year': card.exp_year,
'token': card.id,
'is_autopay': self._is_autopay(card, billing_account),
} for card in self.all_cards]
def get_card(self, card_token):
return self.customer.cards.retrieve(card_token)
def get_autopay_card(self, billing_account):
return next((
card for card in self.all_cards
if self._is_autopay(card, billing_account)
), None)
def remove_card(self, card_token):
card = self.get_card(card_token)
self._remove_card_from_all_accounts(card)
card.delete()
def _remove_card_from_all_accounts(self, card):
accounts = BillingAccount.objects.filter(auto_pay_user=self.web_user)
for account in accounts:
if account.autopay_card == card:
account.remove_autopay_user()
def create_card(self, stripe_token, billing_account, domain, autopay=False):
customer = self.customer
card = customer.cards.create(card=stripe_token)
self.set_default_card(card)
if autopay:
self.set_autopay(card, billing_account, domain)
return card
def set_default_card(self, card):
self.customer.default_card = card
self.customer.save()
return card
def set_autopay(self, card, billing_account, domain):
"""
Sets the auto_pay status on the card for a billing account
If there are other cards that auto_pay for that billing account, remove them
"""
if billing_account.auto_pay_enabled:
self._remove_other_auto_pay_cards(billing_account)
self._update_autopay_status(card, billing_account, autopay=True)
billing_account.update_autopay_user(self.web_user, domain)
def unset_autopay(self, card, billing_account):
"""
Unsets the auto_pay status for this card, and removes it from the billing account
"""
if self._is_autopay(card, billing_account):
self._update_autopay_status(card, billing_account, autopay=False)
billing_account.remove_autopay_user()
def _update_autopay_status(self, card, billing_account, autopay):
metadata = card.metadata.copy()
metadata.update({self._auto_pay_card_metadata_key(billing_account): autopay})
card.metadata = metadata
card.save()
def _remove_autopay_card(self, billing_account):
autopay_card = self.get_autopay_card(billing_account)
if autopay_card is not None:
self._update_autopay_status(autopay_card, billing_account, autopay=False)
@staticmethod
def _remove_other_auto_pay_cards(billing_account):
user = billing_account.auto_pay_user
try:
other_payment_method = StripePaymentMethod.objects.get(web_user=user)
other_payment_method._remove_autopay_card(billing_account)
except StripePaymentMethod.DoesNotExist:
pass
@staticmethod
def _is_autopay(card, billing_account):
return card.metadata.get(StripePaymentMethod._auto_pay_card_metadata_key(billing_account)) == 'True'
@staticmethod
def _auto_pay_card_metadata_key(billing_account):
"""
Returns the autopay key for the billing account
Cards can be used to autopay for multiple billing accounts. This is stored in the `metadata` property
on the card: {metadata: {auto_pay_{billing_account_id_1}: True, auto_pay_{billing_account_id_2}: False}}
"""
return 'auto_pay_{billing_account_id}'.format(billing_account_id=billing_account.id)
def create_charge(self, card, amount_in_dollars, description):
""" Charges a stripe card and returns a transaction id """
amount_in_cents = int((amount_in_dollars * Decimal('100')).quantize(Decimal(10)))
transaction_record = stripe.Charge.create(
card=card,
customer=self.customer,
amount=amount_in_cents,
currency=settings.DEFAULT_CURRENCY,
description=description,
)
return transaction_record.id
class PaymentRecord(models.Model):
"""Records the transaction with external payment APIs.
"""
payment_method = models.ForeignKey(PaymentMethod, on_delete=models.PROTECT,
db_index=True)
date_created = models.DateTimeField(auto_now_add=True)
transaction_id = models.CharField(max_length=255, unique=True)
amount = models.DecimalField(default=Decimal('0.0000'),
max_digits=10, decimal_places=4)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@property
def public_transaction_id(self):
ops_num = settings.INVOICE_STARTING_NUMBER + self.id
return "%sP-%d" % (settings.INVOICE_PREFIX, ops_num)
@classmethod
def create_record(cls, payment_method, transaction_id, amount):
return cls.objects.create(
payment_method=payment_method,
transaction_id=transaction_id,
amount=amount,
)
class CreditAdjustment(ValidateModelMixin, models.Model):
"""
A record of any additions (positive amounts) or deductions (negative amounts) that contributed to the
current balance of the associated CreditLine.
"""
credit_line = models.ForeignKey(CreditLine, on_delete=models.PROTECT)
reason = models.CharField(max_length=25, default=CreditAdjustmentReason.MANUAL,
choices=CreditAdjustmentReason.CHOICES)
note = models.TextField(blank=True)
amount = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
line_item = models.ForeignKey(LineItem, on_delete=models.PROTECT, null=True, blank=True)
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True, blank=True)
customer_invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT, null=True, blank=True)
payment_record = models.ForeignKey(PaymentRecord,
on_delete=models.PROTECT, null=True, blank=True)
related_credit = models.ForeignKey(CreditLine, on_delete=models.PROTECT,
null=True, blank=True, related_name='creditadjustment_related')
date_created = models.DateTimeField(auto_now_add=True)
web_user = models.CharField(max_length=80, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def clean(self):
"""
Only one of either a line item or invoice may be specified as the adjuster.
"""
if self.line_item and self.invoice:
raise ValidationError(_("You can't specify both an invoice and a line item."))
class DomainUserHistory(models.Model):
"""
A record of the number of users in a domain at the record_date.
Created by task calculate_users_and_sms_in_all_domains on the first of every month.
Used to bill clients for the appropriate number of users
"""
domain = models.CharField(max_length=256)
record_date = models.DateField()
num_users = models.IntegerField(default=0)
class Meta:
unique_together = ('domain', 'record_date')
| 38.552843
| 115
| 0.641621
|
import datetime
import itertools
from decimal import Decimal
from io import BytesIO
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models, transaction
from django.db.models import F, Q
from django.db.models.manager import Manager
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
import jsonfield
import stripe
from django_prbac.models import Role
from memoized import memoized
from corehq.apps.domain.shortcuts import publish_domain_saved
from dimagi.ext.couchdbkit import (
BooleanProperty,
DateTimeProperty,
SafeSaveDocument,
StringProperty,
)
from dimagi.utils.web import get_site_domain
from corehq.apps.accounting.emails import send_subscription_change_alert
from corehq.apps.accounting.exceptions import (
AccountingError,
CreditLineError,
InvoiceEmailThrottledError,
NewSubscriptionError,
ProductPlanNotFoundError,
SubscriptionAdjustmentError,
SubscriptionChangeError,
SubscriptionReminderError,
SubscriptionRenewalError,
)
from corehq.apps.accounting.invoice_pdf import InvoiceTemplate
from corehq.apps.accounting.signals import subscription_upgrade_or_downgrade
from corehq.apps.accounting.subscription_changes import (
DomainDowngradeActionHandler,
DomainUpgradeActionHandler,
)
from corehq.apps.accounting.utils import (
EXCHANGE_RATE_DECIMAL_PLACES,
ensure_domain_instance,
fmt_dollar_amount,
get_account_name_from_default_name,
get_address_from_invoice,
get_change_status,
get_dimagi_from_email,
get_privileges,
is_active_subscription,
log_accounting_error,
log_accounting_info,
quantize_accounting_decimal,
)
from corehq.apps.domain import UNKNOWN_DOMAIN
from corehq.apps.domain.models import Domain
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.users.models import WebUser
from corehq.blobs.mixin import CODES, BlobMixin
from corehq.const import USER_DATE_FORMAT
from corehq.privileges import REPORT_BUILDER_ADD_ON_PRIVS
from corehq.util.dates import get_first_last_days
from corehq.util.mixin import ValidateModelMixin
from corehq.util.quickcache import quickcache
from corehq.util.soft_assert import soft_assert
from corehq.util.view_utils import absolute_reverse
integer_field_validators = [MaxValueValidator(2147483647), MinValueValidator(-2147483648)]
MAX_INVOICE_COMMUNICATIONS = 5
SMALL_INVOICE_THRESHOLD = 100
UNLIMITED_FEATURE_USAGE = -1
MINIMUM_SUBSCRIPTION_LENGTH = 30
_soft_assert_contact_emails_missing = soft_assert(
to=['{}@{}'.format(email, 'dimagi.com') for email in [
'accounts',
'billing-dev',
]],
exponential_backoff=False,
)
class BillingAccountType(object):
CONTRACT = "CONTRACT"
USER_CREATED = "USER_CREATED"
GLOBAL_SERVICES = "GLOBAL_SERVICES"
INVOICE_GENERATED = "INVOICE_GENERATED"
TRIAL = "TRIAL"
CHOICES = (
(CONTRACT, "Created by contract"),
(USER_CREATED, "Created by user"),
(GLOBAL_SERVICES, "Created by Global Services"),
(INVOICE_GENERATED, "Generated by an invoice"),
(TRIAL, "Is trial account"),
)
class InvoicingPlan(object):
MONTHLY = "MONTHLY"
QUARTERLY = "QUARTERLY"
YEARLY = "YEARLY"
CHOICES = (
(MONTHLY, "Monthly"),
(QUARTERLY, "Quarterly"),
(YEARLY, "Yearly")
)
class FeatureType(object):
USER = "User"
SMS = "SMS"
CHOICES = (
(USER, USER),
(SMS, SMS),
)
class SoftwarePlanEdition(object):
COMMUNITY = "Community"
STANDARD = "Standard"
PRO = "Pro"
ADVANCED = "Advanced"
ENTERPRISE = "Enterprise"
RESELLER = "Reseller"
MANAGED_HOSTING = "Managed Hosting"
PAUSED = "Paused"
CHOICES = (
(COMMUNITY, COMMUNITY),
(STANDARD, STANDARD),
(PRO, PRO),
(ADVANCED, ADVANCED),
(ENTERPRISE, ENTERPRISE),
(PAUSED, PAUSED),
(RESELLER, RESELLER),
(MANAGED_HOSTING, MANAGED_HOSTING),
)
SELF_SERVICE_ORDER = [
PAUSED,
COMMUNITY,
STANDARD,
PRO,
ADVANCED,
]
class SoftwarePlanVisibility(object):
PUBLIC = "PUBLIC"
INTERNAL = "INTERNAL"
TRIAL = "TRIAL"
CHOICES = (
(PUBLIC, "Anyone can subscribe"),
(INTERNAL, "Dimagi must create subscription"),
(TRIAL, "This is a Trial Plan"),
)
class CreditAdjustmentReason(object):
DIRECT_PAYMENT = "DIRECT_PAYMENT"
SALESFORCE = "SALESFORCE"
INVOICE = "INVOICE"
LINE_ITEM = "LINE_ITEM"
TRANSFER = "TRANSFER"
MANUAL = "MANUAL"
CHOICES = (
(MANUAL, "manual"),
(SALESFORCE, "via Salesforce"),
(INVOICE, "invoice generated"),
(LINE_ITEM, "line item generated"),
(TRANSFER, "transfer from another credit line"),
(DIRECT_PAYMENT, "payment from client received"),
)
class SubscriptionAdjustmentReason(object):
CREATE = "CREATE"
MODIFY = "MODIFY"
CANCEL = "CANCEL"
UPGRADE = "UPGRADE"
DOWNGRADE = "DOWNGRADE"
SWITCH = "SWITCH"
REACTIVATE = "REACTIVATE"
RENEW = "RENEW"
CHOICES = (
(CREATE, "A new subscription created from scratch."),
(MODIFY, "Some part of the subscription was modified...likely a date."),
(CANCEL, "The subscription was cancelled with no followup subscription."),
(UPGRADE, "The subscription was upgraded to the related subscription."),
(DOWNGRADE, "The subscription was downgraded to the related subscription."),
(SWITCH, "The plan was changed to the related subscription and "
"was neither an upgrade or downgrade."),
(REACTIVATE, "The subscription was reactivated."),
(RENEW, "The subscription was renewed."),
)
class SubscriptionAdjustmentMethod(object):
USER = "USER"
INTERNAL = "INTERNAL"
TASK = "TASK"
TRIAL = "TRIAL"
AUTOMATIC_DOWNGRADE = 'AUTOMATIC_DOWNGRADE'
DEFAULT_COMMUNITY = 'DEFAULT_COMMUNITY'
INVOICING = 'INVOICING'
CHOICES = (
(USER, "User"),
(INTERNAL, "Ops"),
(TASK, "[Deprecated] Task (Invoicing)"),
(TRIAL, "30 Day Trial"),
(AUTOMATIC_DOWNGRADE, "Automatic Downgrade"),
(DEFAULT_COMMUNITY, 'Default to Community'),
(INVOICING, 'Invoicing')
)
class PaymentMethodType(object):
STRIPE = "Stripe"
CHOICES = (
(STRIPE, STRIPE),
)
class SubscriptionType(object):
IMPLEMENTATION = "IMPLEMENTATION"
PRODUCT = "PRODUCT"
TRIAL = "TRIAL"
EXTENDED_TRIAL = "EXTENDED_TRIAL"
SANDBOX = "SANDBOX"
INTERNAL = "INTERNAL"
NOT_SET = "NOT_SET"
CHOICES = (
(IMPLEMENTATION, "Implementation"),
(PRODUCT, "Product"),
(TRIAL, "Trial"),
(EXTENDED_TRIAL, "Extended Trial"),
(SANDBOX, "Sandbox"),
(INTERNAL, "Internal"),
)
class ProBonoStatus(object):
YES = "PRO_BONO"
NO = "FULL_PRICE"
DISCOUNTED = "DISCOUNTED"
CHOICES = (
(NO, "Full Price"),
(DISCOUNTED, "Discounted"),
(YES, "Pro Bono"),
)
class FundingSource(object):
DIMAGI = "DIMAGI"
CLIENT = "CLIENT"
EXTERNAL = "EXTERNAL"
CHOICES = (
(DIMAGI, "Dimagi"),
(CLIENT, "Client Funding"),
(EXTERNAL, "External Funding"),
)
class EntryPoint(object):
CONTRACTED = "CONTRACTED"
SELF_STARTED = "SELF_STARTED"
NOT_SET = "NOT_SET"
CHOICES = (
(CONTRACTED, "Contracted"),
(SELF_STARTED, "Self-started"),
(NOT_SET, "Not Set"),
)
class LastPayment(object):
CC_ONE_TIME = "CC_ONE_TIME"
CC_AUTO = "CC_AUTO"
WIRE = "WIRE"
ACH = "ACH"
OTHER = "OTHER"
BU_PAYMENT = "BU_PAYMENT"
NONE = "NONE"
CHOICES = (
(CC_ONE_TIME, "Credit Card - One Time"),
(CC_AUTO, "Credit Card - Autopay"),
(WIRE, "Wire"),
(ACH, "ACH"),
(OTHER, "Other"),
(BU_PAYMENT, "Payment to local BU"),
(NONE, "None"),
)
class PreOrPostPay(object):
PREPAY = "PREPAY"
POSTPAY = "POSTPAY"
NOT_SET = "NOT_SET"
CHOICES = (
(PREPAY, "Prepay"),
(POSTPAY, "Postpay"),
(NOT_SET, "Not Set"),
)
class Currency(models.Model):
code = models.CharField(max_length=3, unique=True)
name = models.CharField(max_length=25, db_index=True)
symbol = models.CharField(max_length=10)
rate_to_default = models.DecimalField(
default=Decimal('1.0'), max_digits=20,
decimal_places=EXCHANGE_RATE_DECIMAL_PLACES,
)
date_updated = models.DateField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@classmethod
def get_default(cls):
default, _ = cls.objects.get_or_create(code=settings.DEFAULT_CURRENCY)
return default
DEFAULT_ACCOUNT_FORMAT = 'Account for Project %s'
class BillingAccount(ValidateModelMixin, models.Model):
name = models.CharField(max_length=200, db_index=True, unique=True)
salesforce_account_id = models.CharField(
db_index=True,
max_length=80,
blank=True,
null=True,
help_text="This is how we link to the salesforce account",
)
created_by = models.CharField(max_length=80, blank=True)
created_by_domain = models.CharField(max_length=256, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
dimagi_contact = models.EmailField(blank=True)
currency = models.ForeignKey(Currency, on_delete=models.PROTECT)
is_auto_invoiceable = models.BooleanField(default=False)
date_confirmed_extra_charges = models.DateTimeField(null=True, blank=True)
account_type = models.CharField(
max_length=25,
default=BillingAccountType.CONTRACT,
choices=BillingAccountType.CHOICES,
)
is_active = models.BooleanField(default=True)
is_customer_billing_account = models.BooleanField(default=False, db_index=True)
enterprise_admin_emails = ArrayField(models.EmailField(), default=list, blank=True)
enterprise_restricted_signup_domains = ArrayField(models.CharField(max_length=128), default=list, blank=True)
invoicing_plan = models.CharField(
max_length=25,
default=InvoicingPlan.MONTHLY,
choices=InvoicingPlan.CHOICES
)
entry_point = models.CharField(
max_length=25,
default=EntryPoint.NOT_SET,
choices=EntryPoint.CHOICES,
)
auto_pay_user = models.CharField(max_length=80, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
last_payment_method = models.CharField(
max_length=25,
default=LastPayment.NONE,
choices=LastPayment.CHOICES,
)
pre_or_post_pay = models.CharField(
max_length=25,
default=PreOrPostPay.NOT_SET,
choices=PreOrPostPay.CHOICES,
)
restrict_domain_creation = models.BooleanField(default=False)
restrict_signup = models.BooleanField(default=False, db_index=True)
restrict_signup_message = models.CharField(max_length=512, null=True, blank=True)
class Meta(object):
app_label = 'accounting'
@property
def auto_pay_enabled(self):
return self.auto_pay_user is not None
@classmethod
def create_account_for_domain(cls, domain,
created_by=None, account_type=None,
entry_point=None, last_payment_method=None,
pre_or_post_pay=None):
account_type = account_type or BillingAccountType.INVOICE_GENERATED
entry_point = entry_point or EntryPoint.NOT_SET
last_payment_method = last_payment_method or LastPayment.NONE
pre_or_post_pay = pre_or_post_pay or PreOrPostPay.POSTPAY
default_name = DEFAULT_ACCOUNT_FORMAT % domain
name = get_account_name_from_default_name(default_name)
return BillingAccount.objects.create(
name=name,
created_by=created_by,
created_by_domain=domain,
currency=Currency.get_default(),
account_type=account_type,
entry_point=entry_point,
last_payment_method=last_payment_method,
pre_or_post_pay=pre_or_post_pay
)
@classmethod
def get_or_create_account_by_domain(cls, domain,
created_by=None, account_type=None,
entry_point=None, last_payment_method=None,
pre_or_post_pay=None):
account = cls.get_account_by_domain(domain)
if account:
return account, False
return cls.create_account_for_domain(
domain,
created_by=created_by,
account_type=account_type,
entry_point=entry_point,
last_payment_method=last_payment_method,
pre_or_post_pay=pre_or_post_pay,
), True
@classmethod
def get_account_by_domain(cls, domain):
current_subscription = Subscription.get_active_subscription_by_domain(domain)
if current_subscription is not None:
return current_subscription.account
else:
return cls._get_account_by_created_by_domain(domain)
@classmethod
def _get_account_by_created_by_domain(cls, domain):
try:
return cls.objects.get(created_by_domain=domain)
except cls.DoesNotExist:
return None
except cls.MultipleObjectsReturned:
log_accounting_error(
f"Multiple billing accounts showed up for the domain '{domain}'. The "
"latest one was served, but you should reconcile very soon.",
show_stack_trace=True,
)
return cls.objects.filter(created_by_domain=domain).latest('date_created')
return None
@classmethod
@quickcache([], timeout=60 * 60)
def get_enterprise_restricted_signup_accounts(cls):
return BillingAccount.objects.filter(is_customer_billing_account=True, restrict_signup=True)
@property
def autopay_card(self):
if not self.auto_pay_enabled:
return None
return StripePaymentMethod.objects.get(web_user=self.auto_pay_user).get_autopay_card(self)
def has_enterprise_admin(self, email):
return self.is_customer_billing_account and email in self.enterprise_admin_emails
def update_autopay_user(self, new_user, domain):
if self.auto_pay_enabled and new_user != self.auto_pay_user:
self._send_autopay_card_removed_email(new_user=new_user, domain=domain)
self.auto_pay_user = new_user
self.save()
self._send_autopay_card_added_email(domain)
def remove_autopay_user(self):
self.auto_pay_user = None
self.save()
def _send_autopay_card_removed_email(self, new_user, domain):
from corehq.apps.domain.views.accounting import EditExistingBillingAccountView
old_user = self.auto_pay_user
subject = _("Your card is no longer being used to auto-pay for {billing_account}").format(
billing_account=self.name)
old_web_user = WebUser.get_by_username(old_user)
if old_web_user:
old_user_name = old_web_user.first_name
else:
old_user_name = old_user
context = {
'new_user': new_user,
'old_user_name': old_user_name,
'billing_account_name': self.name,
'billing_info_url': absolute_reverse(EditExistingBillingAccountView.urlname,
args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
send_html_email_async(
subject,
old_user,
render_to_string('accounting/email/autopay_card_removed.html', context),
text_content=strip_tags(render_to_string('accounting/email/autopay_card_removed.html', context)),
)
def _send_autopay_card_added_email(self, domain):
from corehq.apps.domain.views.accounting import EditExistingBillingAccountView
subject = _("Your card is being used to auto-pay for {billing_account}").format(
billing_account=self.name)
web_user = WebUser.get_by_username(self.auto_pay_user)
new_user_name = web_user.first_name if web_user else self.auto_pay_user
try:
last_4 = self.autopay_card.last4
except StripePaymentMethod.DoesNotExist:
last_4 = None
context = {
'name': new_user_name,
'email': self.auto_pay_user,
'domain': domain,
'last_4': last_4,
'billing_account_name': self.name,
'billing_info_url': absolute_reverse(EditExistingBillingAccountView.urlname,
args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
send_html_email_async(
subject,
self.auto_pay_user,
render_to_string('accounting/email/invoice_autopay_setup.html', context),
text_content=strip_tags(render_to_string('accounting/email/invoice_autopay_setup.html', context)),
)
class BillingContactInfo(models.Model):
account = models.OneToOneField(BillingAccount, primary_key=True, null=False, on_delete=models.CASCADE)
first_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name=_("First Name")
)
last_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name=_("Last Name")
)
email_list = jsonfield.JSONField(
default=list,
verbose_name=_("Contact Emails"),
help_text=_("We will email communications regarding your account "
"to the emails specified here.")
)
phone_number = models.CharField(
max_length=20, null=True, blank=True, verbose_name=_("Phone Number")
)
company_name = models.CharField(
max_length=50, null=True, blank=True,
verbose_name=_("Company / Organization")
)
first_line = models.CharField(
max_length=50, null=False,
verbose_name=_("Address First Line")
)
second_line = models.CharField(
max_length=50, null=True, blank=True,
verbose_name=_("Address Second Line")
)
city = models.CharField(
max_length=50, null=False, verbose_name=_("City")
)
state_province_region = models.CharField(
max_length=50, null=False,
verbose_name=_("State / Province / Region"),
)
postal_code = models.CharField(
max_length=20, null=False, verbose_name=_("Postal Code")
)
country = models.CharField(
max_length=50, null=False, verbose_name=_("Country")
)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __init__(self, *args, **kwargs):
super(BillingContactInfo, self).__init__(*args, **kwargs)
if self.email_list == '[]':
self.email_list = []
@property
def full_name(self):
if not self.first_name:
return self.last_name
elif not self.last_name:
return self.first_name
else:
return "%s %s" % (self.first_name, self.last_name)
class SoftwareProductRate(models.Model):
name = models.CharField(max_length=40)
monthly_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return '%s @ $%s /month' % (self.name, self.monthly_fee)
def __eq__(self, other):
if not isinstance(other, self.__class__) or not self.name == other.name:
return False
for field in ['monthly_fee', 'is_active']:
if not getattr(self, field) == getattr(other, field):
return False
return True
@classmethod
def new_rate(cls, product_name, monthly_fee, save=True):
rate = SoftwareProductRate(name=product_name, monthly_fee=monthly_fee)
if save:
rate.save()
return rate
class Feature(models.Model):
name = models.CharField(max_length=40, unique=True)
feature_type = models.CharField(max_length=10, db_index=True, choices=FeatureType.CHOICES)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "Feature '%s' of type '%s'" % (self.name, self.feature_type)
def get_rate(self, default_instance=True):
try:
return self.featurerate_set.filter(is_active=True).latest('date_created')
except FeatureRate.DoesNotExist:
return FeatureRate() if default_instance else None
class FeatureRate(models.Model):
feature = models.ForeignKey(Feature, on_delete=models.PROTECT)
monthly_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2,
verbose_name="Monthly Fee")
monthly_limit = models.IntegerField(default=0,
verbose_name="Monthly Included Limit",
validators=integer_field_validators)
per_excess_fee = models.DecimalField(default=Decimal('0.00'), max_digits=10, decimal_places=2,
verbose_name="Fee Per Excess of Limit")
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return '%s @ $%s /month, $%s /excess, limit: %d' % (
self.feature.name, self.monthly_fee, self.per_excess_fee, self.monthly_limit
)
def __eq__(self, other):
if not isinstance(other, self.__class__) or not self.feature.pk == other.feature.pk:
return False
for field in ['monthly_fee', 'monthly_limit', 'per_excess_fee', 'is_active']:
if not getattr(self, field) == getattr(other, field):
return False
return True
@classmethod
def new_rate(cls, feature_name, feature_type,
monthly_fee=None, monthly_limit=None, per_excess_fee=None, save=True):
feature, _ = Feature.objects.get_or_create(name=feature_name, feature_type=feature_type)
rate = FeatureRate(feature=feature)
if monthly_fee is not None:
rate.monthly_fee = monthly_fee
if monthly_limit is not None:
rate.monthly_limit = monthly_limit
if per_excess_fee is not None:
rate.per_excess_fee = per_excess_fee
if save:
rate.save()
return rate
class SoftwarePlan(models.Model):
name = models.CharField(max_length=80, unique=True)
description = models.TextField(blank=True,
help_text="If the visibility is INTERNAL, this description field will be used.")
edition = models.CharField(
max_length=25,
default=SoftwarePlanEdition.ENTERPRISE,
choices=SoftwarePlanEdition.CHOICES,
)
visibility = models.CharField(
max_length=10,
default=SoftwarePlanVisibility.INTERNAL,
choices=SoftwarePlanVisibility.CHOICES,
)
last_modified = models.DateTimeField(auto_now=True)
is_customer_software_plan = models.BooleanField(default=False)
max_domains = models.IntegerField(blank=True, null=True)
is_annual_plan = models.BooleanField(default=False)
class Meta(object):
app_label = 'accounting'
@quickcache(vary_on=['self.pk'], timeout=10)
def get_version(self):
try:
return self.softwareplanversion_set.filter(is_active=True).latest('date_created')
except SoftwarePlanVersion.DoesNotExist:
return None
def at_max_domains(self):
if not self.max_domains:
return False
subscription_count = 0
for version in self.softwareplanversion_set.all():
subscription_count += Subscription.visible_objects.filter(plan_version=version, is_active=True).count()
return subscription_count >= self.max_domains
class DefaultProductPlan(models.Model):
edition = models.CharField(
default=SoftwarePlanEdition.COMMUNITY,
choices=SoftwarePlanEdition.CHOICES,
max_length=25,
)
plan = models.ForeignKey(SoftwarePlan, on_delete=models.PROTECT)
is_trial = models.BooleanField(default=False)
is_report_builder_enabled = models.BooleanField(default=False)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
unique_together = ('edition', 'is_trial', 'is_report_builder_enabled')
@classmethod
@quickcache(['edition', 'is_trial', 'is_report_builder_enabled'],
skip_arg=lambda *args, **kwargs: not settings.ENTERPRISE_MODE or settings.UNIT_TESTING)
def get_default_plan_version(cls, edition=None, is_trial=False,
is_report_builder_enabled=False):
if not edition:
edition = (SoftwarePlanEdition.ENTERPRISE if settings.ENTERPRISE_MODE
else SoftwarePlanEdition.COMMUNITY)
try:
default_product_plan = DefaultProductPlan.objects.select_related('plan').get(
edition=edition, is_trial=is_trial,
is_report_builder_enabled=is_report_builder_enabled
)
return default_product_plan.plan.get_version()
except DefaultProductPlan.DoesNotExist:
raise AccountingError(
"No default product plan was set up, did you forget to run migrations?"
)
@classmethod
def get_lowest_edition(cls, requested_privileges, return_plan=False):
for edition in SoftwarePlanEdition.SELF_SERVICE_ORDER:
plan_version = cls.get_default_plan_version(edition)
privileges = get_privileges(plan_version) - REPORT_BUILDER_ADD_ON_PRIVS
if privileges.issuperset(requested_privileges):
return (plan_version if return_plan
else plan_version.plan.edition)
return None if return_plan else SoftwarePlanEdition.ENTERPRISE
class SoftwarePlanVersion(models.Model):
plan = models.ForeignKey(SoftwarePlan, on_delete=models.PROTECT)
product_rate = models.ForeignKey(SoftwareProductRate, on_delete=models.CASCADE)
feature_rates = models.ManyToManyField(FeatureRate, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
role = models.ForeignKey(Role, on_delete=models.CASCADE)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "%(plan_name)s (v%(version_num)d)" % {
'plan_name': self.plan.name,
'version_num': self.version,
}
def save(self, *args, **kwargs):
super(SoftwarePlanVersion, self).save(*args, **kwargs)
SoftwarePlan.get_version.clear(self.plan)
@property
def version(self):
return (self.plan.softwareplanversion_set.count() -
self.plan.softwareplanversion_set.filter(
date_created__gt=self.date_created).count())
@property
def user_facing_description(self):
from corehq.apps.accounting.user_text import DESC_BY_EDITION, FEATURE_TYPE_TO_NAME
def _default_description(plan, monthly_limit):
if plan.edition in [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.STANDARD,
SoftwarePlanEdition.PRO,
SoftwarePlanEdition.ADVANCED,
]:
return DESC_BY_EDITION[plan.edition]['description'].format(monthly_limit)
else:
return DESC_BY_EDITION[plan.edition]['description']
desc = {
'name': self.plan.name,
}
if (
self.plan.visibility == SoftwarePlanVisibility.PUBLIC
or self.plan.visibility == SoftwarePlanVisibility.TRIAL
) or not self.plan.description:
desc['description'] = _default_description(self.plan, self.user_feature.monthly_limit)
else:
desc['description'] = self.plan.description
desc.update({
'monthly_fee': 'USD %s' % self.product_rate.monthly_fee,
'rates': [{'name': FEATURE_TYPE_TO_NAME[r.feature.feature_type],
'included': 'Infinite' if r.monthly_limit == UNLIMITED_FEATURE_USAGE else r.monthly_limit}
for r in self.feature_rates.all()],
'edition': self.plan.edition,
})
return desc
@property
@memoized
def user_feature(self):
user_features = self.feature_rates.filter(feature__feature_type=FeatureType.USER)
try:
user_feature = user_features.order_by('monthly_limit')[0]
if not user_feature.monthly_limit == UNLIMITED_FEATURE_USAGE:
user_feature = user_features.order_by('-monthly_limit')[0]
return user_feature
except IndexError:
pass
@property
def user_limit(self):
if self.user_feature is not None:
return self.user_feature.monthly_limit
return UNLIMITED_FEATURE_USAGE
@property
def user_fee(self):
if self.user_feature is not None:
return "USD %d" % self.user_feature.per_excess_fee
def feature_charges_exist_for_domain(self, domain, start_date=None, end_date=None):
domain_obj = ensure_domain_instance(domain)
if domain_obj is None:
return False
from corehq.apps.accounting.usage import FeatureUsageCalculator
for feature_rate in self.feature_rates.all():
if feature_rate.monthly_limit != UNLIMITED_FEATURE_USAGE:
calc = FeatureUsageCalculator(
feature_rate, domain_obj.name, start_date=start_date,
end_date=end_date
)
if calc.get_usage() > feature_rate.monthly_limit:
return True
return False
@property
def is_paused(self):
return self.plan.edition == SoftwarePlanEdition.PAUSED
class SubscriberManager(models.Manager):
def safe_get(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except Subscriber.DoesNotExist:
return None
class Subscriber(models.Model):
domain = models.CharField(max_length=256, unique=True, db_index=True)
last_modified = models.DateTimeField(auto_now=True)
objects = SubscriberManager()
class Meta(object):
app_label = 'accounting'
def __str__(self):
return "DOMAIN %s" % self.domain
def create_subscription(self, new_plan_version, new_subscription, is_internal_change):
assert new_plan_version
assert new_subscription
return self._apply_upgrades_and_downgrades(
new_plan_version=new_plan_version,
new_subscription=new_subscription,
internal_change=is_internal_change,
)
def change_subscription(self, downgraded_privileges, upgraded_privileges, new_plan_version,
old_subscription, new_subscription, internal_change):
return self._apply_upgrades_and_downgrades(
downgraded_privileges=downgraded_privileges,
upgraded_privileges=upgraded_privileges,
new_plan_version=new_plan_version,
old_subscription=old_subscription,
new_subscription=new_subscription,
internal_change=internal_change,
)
def activate_subscription(self, upgraded_privileges, subscription):
return self._apply_upgrades_and_downgrades(
upgraded_privileges=upgraded_privileges,
new_subscription=subscription,
)
def deactivate_subscription(self, downgraded_privileges, upgraded_privileges,
old_subscription, new_subscription):
return self._apply_upgrades_and_downgrades(
downgraded_privileges=downgraded_privileges,
upgraded_privileges=upgraded_privileges,
old_subscription=old_subscription,
new_subscription=new_subscription,
)
def reactivate_subscription(self, new_plan_version, subscription):
return self._apply_upgrades_and_downgrades(
new_plan_version=new_plan_version,
old_subscription=subscription,
new_subscription=subscription,
)
def _apply_upgrades_and_downgrades(self, new_plan_version=None,
downgraded_privileges=None,
upgraded_privileges=None,
old_subscription=None,
new_subscription=None,
internal_change=False):
if new_plan_version is None:
new_plan_version = DefaultProductPlan.get_default_plan_version()
if downgraded_privileges is None or upgraded_privileges is None:
change_status_result = get_change_status(None, new_plan_version)
downgraded_privileges = downgraded_privileges or change_status_result.downgraded_privs
upgraded_privileges = upgraded_privileges or change_status_result.upgraded_privs
if downgraded_privileges:
Subscriber._process_downgrade(self.domain, downgraded_privileges, new_plan_version)
if upgraded_privileges:
Subscriber._process_upgrade(self.domain, upgraded_privileges, new_plan_version)
if Subscriber.should_send_subscription_notification(old_subscription, new_subscription):
send_subscription_change_alert(self.domain, new_subscription, old_subscription, internal_change)
subscription_upgrade_or_downgrade.send_robust(None, domain=self.domain)
@staticmethod
def should_send_subscription_notification(old_subscription, new_subscription):
if not old_subscription:
return False
is_new_trial = new_subscription and new_subscription.is_trial
expired_trial = old_subscription.is_trial and not new_subscription
return not is_new_trial and not expired_trial
@staticmethod
def _process_downgrade(domain, downgraded_privileges, new_plan_version):
downgrade_handler = DomainDowngradeActionHandler(
domain, new_plan_version, downgraded_privileges,
)
if not downgrade_handler.get_response():
raise SubscriptionChangeError("The downgrade was not successful.")
@staticmethod
def _process_upgrade(domain, upgraded_privileges, new_plan_version):
upgrade_handler = DomainUpgradeActionHandler(
domain, new_plan_version, upgraded_privileges,
)
if not upgrade_handler.get_response():
raise SubscriptionChangeError("The upgrade was not successful.")
class VisibleSubscriptionManager(models.Manager):
use_in_migrations = True
def get_queryset(self):
return super(VisibleSubscriptionManager, self).get_queryset().filter(is_hidden_to_ops=False)
class DisabledManager(models.Manager):
def get_queryset(self):
raise NotImplementedError
class Subscription(models.Model):
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
plan_version = models.ForeignKey(SoftwarePlanVersion, on_delete=models.PROTECT)
subscriber = models.ForeignKey(Subscriber, on_delete=models.PROTECT)
salesforce_contract_id = models.CharField(blank=True, max_length=80)
date_start = models.DateField()
date_end = models.DateField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=False)
do_not_invoice = models.BooleanField(default=False)
no_invoice_reason = models.CharField(blank=True, max_length=256)
do_not_email_invoice = models.BooleanField(default=False)
do_not_email_reminder = models.BooleanField(default=False)
auto_generate_credits = models.BooleanField(default=False)
is_trial = models.BooleanField(default=False)
skip_invoicing_if_no_feature_charges = models.BooleanField(default=False)
service_type = models.CharField(
max_length=25,
choices=SubscriptionType.CHOICES,
default=SubscriptionType.NOT_SET
)
pro_bono_status = models.CharField(
max_length=25,
choices=ProBonoStatus.CHOICES,
default=ProBonoStatus.NO,
)
funding_source = models.CharField(
max_length=25,
choices=FundingSource.CHOICES,
default=FundingSource.CLIENT
)
last_modified = models.DateTimeField(auto_now=True)
is_hidden_to_ops = models.BooleanField(default=False)
skip_auto_downgrade = models.BooleanField(default=False)
skip_auto_downgrade_reason = models.CharField(blank=True, max_length=256)
visible_objects = VisibleSubscriptionManager()
visible_and_suppressed_objects = models.Manager()
objects = DisabledManager()
class Meta(object):
app_label = 'accounting'
def __str__(self):
return ("Subscription to %(plan_version)s for %(subscriber)s. "
"[%(date_start)s - %(date_end)s]" % {
'plan_version': self.plan_version,
'subscriber': self.subscriber,
'date_start': self.date_start.strftime(USER_DATE_FORMAT),
'date_end': (self.date_end.strftime(USER_DATE_FORMAT)
if self.date_end is not None else "--"),
})
def __eq__(self, other):
return (
other is not None
and other.__class__.__name__ == self.__class__.__name__
and other.plan_version.pk == self.plan_version.pk
and other.date_start == self.date_start
and other.date_end == self.date_end
and other.subscriber.pk == self.subscriber.pk
and other.account.pk == self.account.pk
)
def save(self, *args, **kwargs):
from corehq.apps.accounting.mixins import get_overdue_invoice
super(Subscription, self).save(*args, **kwargs)
Subscription._get_active_subscription_by_domain.clear(Subscription, self.subscriber.domain)
get_overdue_invoice.clear(self.subscriber.domain)
domain = Domain.get_by_name(self.subscriber.domain)
# we don't care the pillow won't be updated
if domain:
publish_domain_saved(domain)
def delete(self, *args, **kwargs):
super(Subscription, self).delete(*args, **kwargs)
Subscription._get_active_subscription_by_domain.clear(Subscription, self.subscriber.domain)
@property
def is_community(self):
return self.plan_version.plan.edition == SoftwarePlanEdition.COMMUNITY
@property
def allowed_attr_changes(self):
return ['do_not_invoice', 'no_invoice_reason',
'salesforce_contract_id', 'skip_auto_downgrade']
@property
def next_subscription_filter(self):
return (Subscription.visible_objects.
filter(subscriber=self.subscriber, date_start__gt=self.date_start).
exclude(pk=self.pk).
filter(Q(date_end__isnull=True) | ~Q(date_start=F('date_end'))))
@property
def previous_subscription_filter(self):
return Subscription.visible_objects.filter(
subscriber=self.subscriber,
date_start__lt=self.date_start - datetime.timedelta(days=1)
).exclude(pk=self.pk)
@property
def is_renewed(self):
return self.next_subscription_filter.exists()
@property
def next_subscription(self):
try:
return self.next_subscription_filter.order_by('date_start')[0]
except (Subscription.DoesNotExist, IndexError):
return None
@property
def previous_subscription(self):
try:
return self.previous_subscription_filter.order_by('-date_end')[0]
except (Subscription.DoesNotExist, IndexError):
return None
def raise_conflicting_dates(self, date_start, date_end):
assert date_start is not None
for sub in Subscription.visible_objects.filter(
Q(date_end__isnull=True) | Q(date_end__gt=F('date_start')),
subscriber=self.subscriber,
).exclude(
id=self.id,
):
related_has_no_end = sub.date_end is None
current_has_no_end = date_end is None
start_before_related_end = sub.date_end is not None and date_start < sub.date_end
start_before_related_start = date_start < sub.date_start
start_after_related_start = date_start > sub.date_start
end_before_related_end = (
date_end is not None and sub.date_end is not None
and date_end < sub.date_end
)
end_after_related_end = (
date_end is not None and sub.date_end is not None
and date_end > sub.date_end
)
end_after_related_start = date_end is not None and date_end > sub.date_start
if (
(start_before_related_end and start_after_related_start)
or (start_after_related_start and related_has_no_end)
or (end_after_related_start and end_before_related_end)
or (end_after_related_start and related_has_no_end)
or (start_before_related_start and end_after_related_end)
or (start_before_related_end and current_has_no_end)
or (current_has_no_end and related_has_no_end)
):
raise SubscriptionAdjustmentError(
"The start date of %(start_date)s conflicts with the "
"subscription dates to %(related_sub)s." % {
'start_date': self.date_start.strftime(USER_DATE_FORMAT),
'related_sub': sub,
}
)
def update_subscription(self, date_start, date_end,
do_not_invoice=None,
no_invoice_reason=None, do_not_email_invoice=None,
do_not_email_reminder=None, salesforce_contract_id=None,
auto_generate_credits=None,
web_user=None, note=None, adjustment_method=None,
service_type=None, pro_bono_status=None, funding_source=None,
skip_invoicing_if_no_feature_charges=None, skip_auto_downgrade=None,
skip_auto_downgrade_reason=None):
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
self._update_dates(date_start, date_end)
self._update_properties(
do_not_invoice=do_not_invoice,
no_invoice_reason=no_invoice_reason,
skip_invoicing_if_no_feature_charges=skip_invoicing_if_no_feature_charges,
do_not_email_invoice=do_not_email_invoice,
do_not_email_reminder=do_not_email_reminder,
auto_generate_credits=auto_generate_credits,
salesforce_contract_id=salesforce_contract_id,
service_type=service_type,
pro_bono_status=pro_bono_status,
funding_source=funding_source,
skip_auto_downgrade=skip_auto_downgrade,
skip_auto_downgrade_reason=skip_auto_downgrade_reason,
)
self.save()
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.MODIFY
)
def _update_dates(self, date_start, date_end):
if not date_start:
raise SubscriptionAdjustmentError('Start date must be provided')
if date_end is not None and date_start > date_end:
raise SubscriptionAdjustmentError(
"Can't have a subscription start after the end date."
)
self.raise_conflicting_dates(date_start, date_end)
self.date_start = date_start
self.date_end = date_end
is_active_dates = is_active_subscription(self.date_start, self.date_end)
if self.is_active != is_active_dates:
if is_active_dates:
self.is_active = True
self.subscriber.activate_subscription(get_privileges(self.plan_version), self)
else:
raise SubscriptionAdjustmentError(
'Cannot deactivate a subscription here. Cancel subscription instead.'
)
def _update_properties(self, **kwargs):
property_names = {
'do_not_invoice',
'no_invoice_reason',
'skip_invoicing_if_no_feature_charges',
'do_not_email_invoice',
'do_not_email_reminder',
'auto_generate_credits',
'salesforce_contract_id',
'service_type',
'pro_bono_status',
'funding_source',
'skip_auto_downgrade',
'skip_auto_downgrade_reason',
}
assert property_names >= set(kwargs.keys())
for property_name, property_value in kwargs.items():
if property_value is not None:
setattr(self, property_name, property_value)
@transaction.atomic
def change_plan(self, new_plan_version, date_end=None,
note=None, web_user=None, adjustment_method=None,
service_type=None, pro_bono_status=None, funding_source=None,
transfer_credits=True, internal_change=False, account=None,
do_not_invoice=None, no_invoice_reason=None,
auto_generate_credits=False, is_trial=False):
from corehq.apps.analytics.tasks import track_workflow
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
today = datetime.date.today()
assert self.is_active
assert date_end is None or date_end >= today
if new_plan_version.plan.at_max_domains() and self.plan_version.plan != new_plan_version.plan:
raise SubscriptionAdjustmentError(
'The maximum number of project spaces has been reached for %(new_plan_version)s. ' % {
'new_plan_version': new_plan_version,
}
)
self.date_end = today
self.is_active = False
self.save()
new_subscription = Subscription(
account=account if account else self.account,
plan_version=new_plan_version,
subscriber=self.subscriber,
salesforce_contract_id=self.salesforce_contract_id,
date_start=today,
date_end=date_end,
is_active=True,
do_not_invoice=do_not_invoice if do_not_invoice is not None else self.do_not_invoice,
no_invoice_reason=no_invoice_reason if no_invoice_reason is not None else self.no_invoice_reason,
auto_generate_credits=auto_generate_credits,
is_trial=is_trial,
service_type=(service_type or SubscriptionType.NOT_SET),
pro_bono_status=(pro_bono_status or ProBonoStatus.NO),
funding_source=(funding_source or FundingSource.CLIENT),
skip_auto_downgrade=False,
skip_auto_downgrade_reason='',
)
new_subscription.save()
new_subscription.raise_conflicting_dates(new_subscription.date_start, new_subscription.date_end)
new_subscription.set_billing_account_entry_point()
change_status_result = get_change_status(self.plan_version, new_plan_version)
self.subscriber.change_subscription(
downgraded_privileges=change_status_result.downgraded_privs,
upgraded_privileges=change_status_result.upgraded_privs,
new_plan_version=new_plan_version,
old_subscription=self,
new_subscription=new_subscription,
internal_change=internal_change,
)
if transfer_credits:
self.transfer_credits(new_subscription)
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=change_status_result.adjustment_reason, related_subscription=new_subscription
)
SubscriptionAdjustment.record_adjustment(
new_subscription, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.CREATE
)
upgrade_reasons = [SubscriptionAdjustmentReason.UPGRADE, SubscriptionAdjustmentReason.CREATE]
if web_user and adjustment_method == SubscriptionAdjustmentMethod.USER:
if change_status_result.adjustment_reason in upgrade_reasons:
track_workflow(web_user, 'Changed Plan: Upgrade')
if change_status_result.adjustment_reason == SubscriptionAdjustmentReason.DOWNGRADE:
track_workflow(web_user, 'Changed Plan: Downgrade')
return new_subscription
def reactivate_subscription(self, date_end=None, note=None, web_user=None,
adjustment_method=None, **kwargs):
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
self.date_end = date_end
self.is_active = True
for allowed_attr in self.allowed_attr_changes:
if allowed_attr in kwargs:
setattr(self, allowed_attr, kwargs[allowed_attr])
self.save()
self.subscriber.reactivate_subscription(
new_plan_version=self.plan_version,
subscription=self,
)
SubscriptionAdjustment.record_adjustment(
self, reason=SubscriptionAdjustmentReason.REACTIVATE,
method=adjustment_method, note=note, web_user=web_user,
)
def renew_subscription(self, note=None, web_user=None,
adjustment_method=None,
service_type=None, pro_bono_status=None,
funding_source=None, new_version=None):
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
if self.date_end is None:
raise SubscriptionRenewalError(
"Cannot renew a subscription with no date_end set."
)
if new_version is None:
current_privileges = get_privileges(self.plan_version)
new_version = DefaultProductPlan.get_lowest_edition(
current_privileges, return_plan=True,
)
if new_version is None:
raise SubscriptionRenewalError(
"There was an issue renewing your subscription. Someone "
"from Dimagi will get back to you shortly."
)
renewed_subscription = Subscription(
account=self.account,
plan_version=new_version,
subscriber=self.subscriber,
salesforce_contract_id=self.salesforce_contract_id,
date_start=self.date_end,
date_end=None,
)
if service_type is not None:
renewed_subscription.service_type = service_type
if pro_bono_status is not None:
renewed_subscription.pro_bono_status = pro_bono_status
if funding_source is not None:
renewed_subscription.funding_source = funding_source
if datetime.date.today() == self.date_end:
renewed_subscription.is_active = True
renewed_subscription.save()
SubscriptionAdjustment.record_adjustment(
self, method=adjustment_method, note=note, web_user=web_user,
reason=SubscriptionAdjustmentReason.RENEW,
)
return renewed_subscription
def transfer_credits(self, subscription=None):
if subscription is not None and self.account.pk != subscription.account.pk:
raise CreditLineError(
"Can only transfer subscription credits under the same "
"Billing Account."
)
source_credits = CreditLine.objects.filter(
account=self.account,
subscription=self,
).all()
for credit_line in source_credits:
transferred_credit = CreditLine.add_credit(
credit_line.balance,
account=self.account,
subscription=subscription,
feature_type=credit_line.feature_type,
is_product=credit_line.is_product,
related_credit=credit_line
)
credit_line.is_active = False
credit_line.adjust_credit_balance(
credit_line.balance * Decimal('-1'),
related_credit=transferred_credit,
)
def send_ending_reminder_email(self):
if self.date_end is None:
raise SubscriptionReminderError(
"This subscription has no end date."
)
today = datetime.date.today()
num_days_left = (self.date_end - today).days
domain_name = self.subscriber.domain
context = self.ending_reminder_context
subject = context['subject']
template = self.ending_reminder_email_html
template_plaintext = self.ending_reminder_email_text
email_html = render_to_string(template, context)
email_plaintext = render_to_string(template_plaintext, context)
bcc = [settings.ACCOUNTS_EMAIL] if not self.is_trial else []
if self.account.dimagi_contact is not None:
bcc.append(self.account.dimagi_contact)
for email in self._reminder_email_contacts(domain_name):
send_html_email_async.delay(
subject, email, email_html,
text_content=email_plaintext,
email_from=get_dimagi_from_email(),
bcc=bcc,
)
log_accounting_info(
"Sent %(days_left)s-day subscription reminder "
"email for %(domain)s to %(email)s." % {
'days_left': num_days_left,
'domain': domain_name,
'email': email,
}
)
@property
def ending_reminder_email_html(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder.html'
elif self.is_trial:
return 'accounting/email/trial_ending_reminder.html'
else:
return 'accounting/email/subscription_ending_reminder.html'
@property
def ending_reminder_email_text(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder.txt'
elif self.is_trial:
return 'accounting/email/trial_ending_reminder.txt'
else:
return 'accounting/email/subscription_ending_reminder.txt'
@property
def ending_reminder_context(self):
from corehq.apps.domain.views.accounting import DomainSubscriptionView
today = datetime.date.today()
num_days_left = (self.date_end - today).days
if num_days_left == 1:
ending_on = _("tomorrow!")
else:
ending_on = _("on %s." % self.date_end.strftime(USER_DATE_FORMAT))
user_desc = self.plan_version.user_facing_description
plan_name = user_desc['name']
domain_name = self.subscriber.domain
context = {
'domain': domain_name,
'plan_name': plan_name,
'account': self.account.name,
'ending_on': ending_on,
'subscription_url': absolute_reverse(
DomainSubscriptionView.urlname, args=[self.subscriber.domain]),
'base_url': get_site_domain(),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
'sales_email': settings.SALES_EMAIL,
}
if self.account.is_customer_billing_account:
subject = _(
"CommCare Alert: %(account_name)s's subscription to "
"%(plan_name)s ends %(ending_on)s"
) % {
'account_name': self.account.name,
'plan_name': plan_name,
'ending_on': ending_on,
}
elif self.is_trial:
subject = _("CommCare Alert: 30 day trial for '%(domain)s' "
"ends %(ending_on)s") % {
'domain': domain_name,
'ending_on': ending_on,
}
else:
subject = _(
"CommCare Alert: %(domain)s's subscription to "
"%(plan_name)s ends %(ending_on)s"
) % {
'plan_name': plan_name,
'domain': domain_name,
'ending_on': ending_on,
}
context.update({'subject': subject})
return context
def send_dimagi_ending_reminder_email(self):
if self.date_end is None:
raise SubscriptionReminderError(
"This subscription has no end date."
)
if self.account.dimagi_contact is None:
raise SubscriptionReminderError(
"This subscription has no Dimagi contact."
)
subject = self.dimagi_ending_reminder_subject
context = self.dimagi_ending_reminder_context
email_html = render_to_string(self.dimagi_ending_reminder_email_html, context)
email_plaintext = render_to_string(self.dimagi_ending_reminder_email_text, context)
send_html_email_async.delay(
subject, self.account.dimagi_contact, email_html,
text_content=email_plaintext,
email_from=settings.DEFAULT_FROM_EMAIL,
)
@property
def dimagi_ending_reminder_email_html(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder_dimagi.html'
else:
return 'accounting/email/subscription_ending_reminder_dimagi.html'
@property
def dimagi_ending_reminder_email_text(self):
if self.account.is_customer_billing_account:
return 'accounting/email/customer_subscription_ending_reminder_dimagi.txt'
else:
return 'accounting/email/subscription_ending_reminder_dimagi.txt'
@property
def dimagi_ending_reminder_subject(self):
if self.account.is_customer_billing_account:
return "Alert: {account}'s subscriptions are ending on {end_date}".format(
account=self.account.name,
end_date=self.date_end.strftime(USER_DATE_FORMAT))
else:
return "Alert: {domain}'s subscription is ending on {end_date}".format(
domain=self.subscriber.domain,
end_date=self.date_end.strftime(USER_DATE_FORMAT))
@property
def dimagi_ending_reminder_context(self):
end_date = self.date_end.strftime(USER_DATE_FORMAT)
email = self.account.dimagi_contact
if self.account.is_customer_billing_account:
account = self.account.name
plan = self.plan_version.plan.edition
context = {
'account': account,
'plan': plan,
'end_date': end_date,
'client_reminder_email_date': (self.date_end - datetime.timedelta(days=30)).strftime(
USER_DATE_FORMAT),
'contacts': ', '.join(self._reminder_email_contacts(self.subscriber.domain)),
'dimagi_contact': email,
'accounts_email': settings.ACCOUNTS_EMAIL
}
else:
domain = self.subscriber.domain
context = {
'domain': domain,
'end_date': end_date,
'client_reminder_email_date': (self.date_end - datetime.timedelta(days=30)).strftime(
USER_DATE_FORMAT),
'contacts': ', '.join(self._reminder_email_contacts(domain)),
'dimagi_contact': email,
}
return context
def _reminder_email_contacts(self, domain_name):
emails = {a.username for a in WebUser.get_admins_by_domain(domain_name)}
emails |= {e for e in WebUser.get_dimagi_emails_by_domain(domain_name)}
if not self.is_trial:
billing_contact_emails = (
self.account.billingcontactinfo.email_list
if BillingContactInfo.objects.filter(account=self.account).exists() else []
)
if not billing_contact_emails:
from corehq.apps.accounting.views import ManageBillingAccountView
_soft_assert_contact_emails_missing(
False,
'Billing Account for project %s is missing client contact emails: %s' % (
domain_name,
absolute_reverse(ManageBillingAccountView.urlname, args=[self.account.id])
)
)
emails |= {billing_contact_email for billing_contact_email in billing_contact_emails}
if self.account.is_customer_billing_account:
enterprise_admin_emails = self.account.enterprise_admin_emails
emails |= {enterprise_admin_email for enterprise_admin_email in enterprise_admin_emails}
return emails
def set_billing_account_entry_point(self):
no_current_entry_point = self.account.entry_point == EntryPoint.NOT_SET
self_serve = self.service_type == SubscriptionType.PRODUCT
if no_current_entry_point and self_serve and not self.is_trial:
self.account.entry_point = EntryPoint.SELF_STARTED
self.account.save()
@classmethod
def get_active_subscription_by_domain(cls, domain_name_or_obj):
if settings.ENTERPRISE_MODE:
return None
if isinstance(domain_name_or_obj, Domain):
return cls._get_active_subscription_by_domain(domain_name_or_obj.name)
return cls._get_active_subscription_by_domain(domain_name_or_obj)
@classmethod
@quickcache(['domain_name'], timeout=60 * 60)
def _get_active_subscription_by_domain(cls, domain_name):
try:
return cls.visible_objects.select_related(
'plan_version__role'
).get(
is_active=True,
subscriber__domain=domain_name,
)
except cls.DoesNotExist:
return None
@classmethod
def get_subscribed_plan_by_domain(cls, domain):
domain_obj = ensure_domain_instance(domain)
if domain_obj is None:
try:
return DefaultProductPlan.get_default_plan_version()
except DefaultProductPlan.DoesNotExist:
raise ProductPlanNotFoundError
else:
active_subscription = cls.get_active_subscription_by_domain(domain_obj.name)
if active_subscription is not None:
return active_subscription.plan_version
else:
return DefaultProductPlan.get_default_plan_version()
@classmethod
def new_domain_subscription(cls, account, domain, plan_version,
date_start=None, date_end=None, note=None,
web_user=None, adjustment_method=None, internal_change=False,
**kwargs):
if plan_version.plan.at_max_domains():
raise NewSubscriptionError(
'The maximum number of project spaces has been reached for %(plan_version)s. ' % {
'plan_version': plan_version,
}
)
if plan_version.plan.is_customer_software_plan != account.is_customer_billing_account:
if plan_version.plan.is_customer_software_plan:
raise NewSubscriptionError(
'You are trying to add a Customer Software Plan to a regular Billing Account. '
'Both or neither must be customer-level.'
)
else:
raise NewSubscriptionError(
'You are trying to add a regular Software Plan to a Customer Billing Account. '
'Both or neither must be customer-level.'
)
subscriber = Subscriber.objects.get_or_create(domain=domain)[0]
today = datetime.date.today()
date_start = date_start or today
available_subs = Subscription.visible_objects.filter(
subscriber=subscriber,
)
future_subscription_no_end = available_subs.filter(
date_end__exact=None,
)
if date_end is not None:
future_subscription_no_end = future_subscription_no_end.filter(date_start__lt=date_end)
if future_subscription_no_end.count() > 0:
raise NewSubscriptionError(_(
"There is already a subscription '%s' with no end date "
"that conflicts with the start and end dates of this "
"subscription.") %
future_subscription_no_end.latest('date_created')
)
future_subscriptions = available_subs.filter(
date_end__gt=date_start
)
if date_end is not None:
future_subscriptions = future_subscriptions.filter(date_start__lt=date_end)
if future_subscriptions.count() > 0:
raise NewSubscriptionError(str(
_(
"There is already a subscription '%(sub)s' that has an end date "
"that conflicts with the start and end dates of this "
"subscription %(start)s - %(end)s."
) % {
'sub': future_subscriptions.latest('date_created'),
'start': date_start,
'end': date_end
}
))
can_reactivate, last_subscription = cls.can_reactivate_domain_subscription(
account, domain, plan_version, date_start=date_start
)
if can_reactivate:
last_subscription.reactivate_subscription(
date_end=date_end, note=note, web_user=web_user,
adjustment_method=adjustment_method,
**kwargs
)
return last_subscription
adjustment_method = adjustment_method or SubscriptionAdjustmentMethod.INTERNAL
subscription = Subscription.visible_objects.create(
account=account,
plan_version=plan_version,
subscriber=subscriber,
date_start=date_start,
date_end=date_end,
**kwargs
)
subscription.is_active = is_active_subscription(date_start, date_end)
if subscription.is_active:
subscriber.create_subscription(
new_plan_version=plan_version,
new_subscription=subscription,
is_internal_change=internal_change,
)
SubscriptionAdjustment.record_adjustment(
subscription, method=adjustment_method, note=note,
web_user=web_user
)
subscription.save()
subscription.set_billing_account_entry_point()
return subscription
@classmethod
def can_reactivate_domain_subscription(cls, account, domain, plan_version,
date_start=None):
subscriber = Subscriber.objects.get_or_create(domain=domain)[0]
date_start = date_start or datetime.date.today()
last_subscription = Subscription.visible_objects.filter(
subscriber=subscriber, date_end=date_start
)
if not last_subscription.exists():
return False, None
last_subscription = last_subscription.latest('date_created')
return (
last_subscription.account.pk == account.pk and
last_subscription.plan_version.pk == plan_version.pk
), last_subscription
@property
def is_below_minimum_subscription(self):
if self.is_trial:
return False
elif self.date_start < datetime.date(2018, 9, 5):
return False
elif self.date_start + datetime.timedelta(days=MINIMUM_SUBSCRIPTION_LENGTH) >= datetime.date.today():
return True
else:
return False
def user_can_change_subscription(self, user):
if user.is_superuser:
return True
elif self.account.is_customer_billing_account:
return self.account.has_enterprise_admin(user.email)
else:
return True
class InvoiceBaseManager(models.Manager):
def get_queryset(self):
return super(InvoiceBaseManager, self).get_queryset().filter(is_hidden_to_ops=False)
class InvoiceBase(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
is_hidden = models.BooleanField(default=False)
tax_rate = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
balance = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
date_due = models.DateField(db_index=True, null=True)
date_paid = models.DateField(blank=True, null=True)
date_start = models.DateField()
date_end = models.DateField()
is_hidden_to_ops = models.BooleanField(default=False)
last_modified = models.DateTimeField(auto_now=True)
objects = InvoiceBaseManager()
api_objects = Manager()
class Meta(object):
abstract = True
@property
def is_customer_invoice(self):
return False
@property
def invoice_number(self):
ops_num = settings.INVOICE_STARTING_NUMBER + self.id
return "%s%d" % (settings.INVOICE_PREFIX, ops_num)
@property
def is_wire(self):
return False
def get_domain(self):
raise NotImplementedError()
@property
def account(self):
raise NotImplementedError()
@property
def is_paid(self):
return bool(self.date_paid)
@property
def email_recipients(self):
raise NotImplementedError
class WireInvoice(InvoiceBase):
domain = models.CharField(max_length=100)
class Meta(object):
app_label = 'accounting'
@property
@memoized
def account(self):
return BillingAccount.get_account_by_domain(self.domain)
@property
def subtotal(self):
return self.balance
@property
def is_wire(self):
return True
@property
def is_prepayment(self):
return False
def get_domain(self):
return self.domain
def get_total(self):
return self.balance
@property
def email_recipients(self):
try:
original_record = WireBillingRecord.objects.filter(invoice=self).order_by('-date_created')[0]
return original_record.emailed_to_list
except IndexError:
log_accounting_error(
"Strange that WireInvoice %d has no associated WireBillingRecord. "
"Should investigate."
% self.id
)
return []
class WirePrepaymentInvoice(WireInvoice):
class Meta(object):
app_label = 'accounting'
proxy = True
items = []
@property
def is_prepayment(self):
return True
class Invoice(InvoiceBase):
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT)
class Meta(object):
app_label = 'accounting'
def save(self, *args, **kwargs):
from corehq.apps.accounting.mixins import get_overdue_invoice
super(Invoice, self).save(*args, **kwargs)
get_overdue_invoice.clear(self.subscription.subscriber.domain)
@property
def email_recipients(self):
if self.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return [settings.ACCOUNTS_EMAIL]
else:
return self.contact_emails
@property
def contact_emails(self):
try:
billing_contact_info = BillingContactInfo.objects.get(account=self.account)
contact_emails = billing_contact_info.email_list
except BillingContactInfo.DoesNotExist:
contact_emails = []
if not contact_emails:
from corehq.apps.accounting.views import ManageBillingAccountView
admins = WebUser.get_admins_by_domain(self.get_domain())
contact_emails = [admin.email if admin.email else admin.username for admin in admins]
if not settings.UNIT_TESTING:
_soft_assert_contact_emails_missing(
False,
"Could not find an email to send the invoice "
"email to for the domain %s. Sending to domain admins instead: %s."
" Add client contact emails here: %s" % (
self.get_domain(),
', '.join(contact_emails),
absolute_reverse(ManageBillingAccountView.urlname, args=[self.account.id]),
)
)
return contact_emails
@property
def subtotal(self):
if self.lineitem_set.count() == 0:
return Decimal('0.0000')
return sum([line_item.total for line_item in self.lineitem_set.all()])
@property
def applied_tax(self):
return Decimal('%.4f' % round(self.tax_rate * self.subtotal, 4))
@property
@memoized
def account(self):
return self.subscription.account
@property
def applied_credit(self):
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
def get_total(self):
return self.subtotal + self.applied_tax + self.applied_credit
def update_balance(self):
self.balance = self.get_total()
if self.balance <= 0:
self.date_paid = datetime.date.today()
else:
self.date_paid = None
def calculate_credit_adjustments(self):
for line_item in self.lineitem_set.all():
line_item.calculate_credit_adjustments()
current_total = self.get_total()
credit_lines = CreditLine.get_credits_for_invoice(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, invoice=self)
@classmethod
def exists_for_domain(cls, domain):
return cls.objects.filter(
subscription__subscriber__domain=domain, is_hidden=False
).count() > 0
def get_domain(self):
return self.subscription.subscriber.domain
@classmethod
def autopayable_invoices(cls, date_due):
invoices = cls.objects.select_related('subscription__account').filter(
date_due=date_due,
is_hidden=False,
subscription__account__auto_pay_user__isnull=False,
)
return invoices
def pay_invoice(self, payment_record):
CreditLine.make_payment_towards_invoice(
invoice=self,
payment_record=payment_record,
)
self.update_balance()
self.save()
class CustomerInvoice(InvoiceBase):
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
subscriptions = models.ManyToManyField(Subscription, default=list, blank=True)
class Meta(object):
app_label = 'accounting'
@property
def is_customer_invoice(self):
return True
def get_domain(self):
return None
@property
def email_recipients(self):
try:
billing_contact_info = BillingContactInfo.objects.get(account=self.account)
contact_emails = billing_contact_info.email_list
except BillingContactInfo.DoesNotExist:
contact_emails = []
return contact_emails
@property
def contact_emails(self):
return self.account.enterprise_admin_emails
@property
def subtotal(self):
if self.lineitem_set.count() == 0:
return Decimal('0.0000')
return sum([line_item.total for line_item in self.lineitem_set.all()])
@property
def applied_tax(self):
return Decimal('%.4f' % round(self.tax_rate * self.subtotal, 4))
@property
def applied_credit(self):
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
def get_total(self):
return self.subtotal + self.applied_tax + self.applied_credit
def update_balance(self):
self.balance = self.get_total()
if self.balance <= 0:
self.date_paid = datetime.date.today()
else:
self.date_paid = None
def calculate_credit_adjustments(self):
for line_item in self.lineitem_set.all():
line_item.calculate_credit_adjustments()
current_total = self.get_total()
credit_lines = CreditLine.get_credits_for_customer_invoice(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, customer_invoice=self)
def pay_invoice(self, payment_record):
CreditLine.make_payment_towards_invoice(
invoice=self,
payment_record=payment_record,
)
self.update_balance()
self.save()
@classmethod
def exists_for_domain(cls, domain):
invoices = cls.objects.filter(is_hidden=False)
for subscription in invoices.subscriptions.filter(is_hidden=False):
if subscription.subscriber.domain == domain:
return True
return False
@classmethod
def autopayable_invoices(cls, date_due):
invoices = cls.objects.select_related('account').filter(
date_due=date_due,
is_hidden=False,
account__auto_pay_user__isnull=False
)
return invoices
class SubscriptionAdjustment(models.Model):
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT)
reason = models.CharField(max_length=50, default=SubscriptionAdjustmentReason.CREATE,
choices=SubscriptionAdjustmentReason.CHOICES)
method = models.CharField(max_length=50, default=SubscriptionAdjustmentMethod.INTERNAL,
choices=SubscriptionAdjustmentMethod.CHOICES)
note = models.TextField(null=True)
web_user = models.CharField(max_length=80, null=True)
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True)
related_subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT, null=True,
related_name='subscriptionadjustment_related')
date_created = models.DateTimeField(auto_now_add=True)
new_date_start = models.DateField()
new_date_end = models.DateField(blank=True, null=True)
new_date_delay_invoicing = models.DateField(blank=True, null=True)
new_salesforce_contract_id = models.CharField(blank=True, null=True, max_length=80)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@classmethod
def record_adjustment(cls, subscription, **kwargs):
adjustment = SubscriptionAdjustment(
subscription=subscription,
new_date_start=subscription.date_start,
new_date_end=subscription.date_end,
new_salesforce_contract_id=subscription.salesforce_contract_id,
**kwargs
)
adjustment.save()
return adjustment
class BillingRecordBase(models.Model):
date_created = models.DateTimeField(auto_now_add=True, db_index=True)
emailed_to_list = ArrayField(models.EmailField(), default=list)
skipped_email = models.BooleanField(default=False)
pdf_data_id = models.CharField(max_length=48)
last_modified = models.DateTimeField(auto_now=True)
INVOICE_HTML_TEMPLATE = 'accounting/email/invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/invoice.txt'
class Meta(object):
abstract = True
_pdf = None
@property
def pdf(self):
if self._pdf is None:
return InvoicePdf.get(self.pdf_data_id)
return self._pdf
@property
def html_template(self):
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
raise NotImplementedError("should_send_email is required")
@classmethod
def generate_record(cls, invoice):
record = cls(invoice=invoice)
invoice_pdf = InvoicePdf()
invoice_pdf.generate_pdf(record.invoice)
record.pdf_data_id = invoice_pdf._id
record._pdf = invoice_pdf
record.save()
return record
def handle_throttled_email(self, contact_emails):
self.skipped_email = True
month_name = self.invoice.date_start.strftime("%B")
self.save()
log_accounting_info(
"Throttled billing statements for domain %(domain)s "
"to %(emails)s." % {
'domain': self.invoice.get_domain(),
'emails': ', '.join(contact_emails),
}
)
raise InvoiceEmailThrottledError(
"Invoice communications exceeded the maximum limit of "
"%(max_limit)d for domain %(domain)s for the month of "
"%(month_name)s." % {
'max_limit': MAX_INVOICE_COMMUNICATIONS,
'domain': self.invoice.get_domain(),
'month_name': month_name,
})
def email_context(self):
from corehq.apps.domain.views.accounting import DomainBillingStatementsView
from corehq.apps.domain.views.settings import DefaultProjectSettingsView
month_name = self.invoice.date_start.strftime("%B")
domain = self.invoice.get_domain()
context = {
'month_name': month_name,
'domain': domain,
'domain_url': absolute_reverse(DefaultProjectSettingsView.urlname,
args=[domain]),
'statement_number': self.invoice.invoice_number,
'payment_status': (_("Paid") if self.invoice.is_paid
else _("Payment Required")),
'amount_due': fmt_dollar_amount(self.invoice.balance),
'statements_url': absolute_reverse(
DomainBillingStatementsView.urlname, args=[domain]),
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
'accounts_email': settings.ACCOUNTS_EMAIL,
}
return context
def email_subject(self):
raise NotImplementedError()
def can_view_statement(self, web_user):
raise NotImplementedError()
def send_email(self, contact_email=None, cc_emails=None):
pdf_attachment = {
'title': self.pdf.get_filename(self.invoice),
'file_obj': BytesIO(self.pdf.get_data(self.invoice)),
'mimetype': 'application/pdf',
}
domain = self.invoice.get_domain()
subject = self.email_subject()
context = self.email_context()
email_from = self.email_from()
greeting = _("Hello,")
can_view_statement = False
web_user = WebUser.get_by_username(contact_email)
if web_user is not None:
if web_user.first_name:
greeting = _("Dear %s,") % web_user.first_name
can_view_statement = self.can_view_statement(web_user)
context['greeting'] = greeting
context['can_view_statement'] = can_view_statement
email_html = render_to_string(self.html_template, context)
email_plaintext = render_to_string(self.text_template, context)
send_html_email_async.delay(
subject, contact_email, email_html,
text_content=email_plaintext,
email_from=email_from,
file_attachments=[pdf_attachment],
cc=cc_emails
)
self.emailed_to_list.extend([contact_email])
if cc_emails:
self.emailed_to_list.extend(cc_emails)
self.save()
if self.invoice.is_customer_invoice:
log_message = "Sent billing statements for account %(account)s to %(emails)s." % {
'account': self.invoice.account,
'emails': contact_email,
}
else:
log_message = "Sent billing statements for domain %(domain)s to %(emails)s." % {
'domain': domain,
'emails': contact_email,
}
log_accounting_info(log_message)
class WireBillingRecord(BillingRecordBase):
invoice = models.ForeignKey(WireInvoice, on_delete=models.PROTECT)
INVOICE_HTML_TEMPLATE = 'accounting/email/wire_invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/wire_invoice.txt'
class Meta(object):
app_label = 'accounting'
@property
def should_send_email(self):
hidden = self.invoice.is_hidden
return not hidden
@staticmethod
def is_email_throttled():
return False
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s Bulk Billing Statement for Project Space %(domain)s" % {
'month': month_name,
'domain': self.invoice.get_domain(),
}
@staticmethod
def email_from():
return "Dimagi Accounting <{email}>".format(email=settings.INVOICING_CONTACT_EMAIL)
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class WirePrepaymentBillingRecord(WireBillingRecord):
class Meta(object):
app_label = 'accounting'
proxy = True
def email_subject(self):
return _("Your prepayment invoice")
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class BillingRecord(BillingRecordBase):
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT)
INVOICE_CONTRACTED_HTML_TEMPLATE = 'accounting/email/invoice_contracted.html'
INVOICE_CONTRACTED_TEXT_TEMPLATE = 'accounting/email/invoice_contracted.txt'
INVOICE_AUTOPAY_HTML_TEMPLATE = 'accounting/email/invoice_autopayment.html'
INVOICE_AUTOPAY_TEXT_TEMPLATE = 'accounting/email/invoice_autopayment.txt'
class Meta(object):
app_label = 'accounting'
@property
def html_template(self):
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return self.INVOICE_CONTRACTED_HTML_TEMPLATE
if self.invoice.subscription.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_HTML_TEMPLATE
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
return self.INVOICE_CONTRACTED_TEXT_TEMPLATE
if self.invoice.subscription.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_TEXT_TEMPLATE
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
subscription = self.invoice.subscription
autogenerate = (subscription.auto_generate_credits and not self.invoice.balance)
small_contracted = (self.invoice.balance <= SMALL_INVOICE_THRESHOLD and
subscription.service_type == SubscriptionType.IMPLEMENTATION)
hidden = self.invoice.is_hidden
do_not_email_invoice = self.invoice.subscription.do_not_email_invoice
return not (autogenerate or small_contracted or hidden or do_not_email_invoice)
def is_email_throttled(self):
month = self.invoice.date_start.month
year = self.invoice.date_start.year
date_start, date_end = get_first_last_days(year, month)
return self.__class__.objects.filter(
invoice__date_start__lte=date_end, invoice__date_end__gte=date_start,
invoice__subscription__subscriber=self.invoice.subscription.subscriber,
invoice__is_hidden_to_ops=False,
).count() > MAX_INVOICE_COMMUNICATIONS
def email_context(self):
context = super(BillingRecord, self).email_context()
total_balance = sum(invoice.balance for invoice in Invoice.objects.filter(
is_hidden=False,
subscription__subscriber__domain=self.invoice.get_domain(),
))
is_small_invoice = self.invoice.balance < SMALL_INVOICE_THRESHOLD
payment_status = (_("Paid")
if self.invoice.is_paid or total_balance == 0
else _("Payment Required"))
context.update({
'plan_name': self.invoice.subscription.plan_version.plan.name,
'date_due': self.invoice.date_due,
'is_small_invoice': is_small_invoice,
'total_balance': total_balance,
'is_total_balance_due': total_balance >= SMALL_INVOICE_THRESHOLD,
'payment_status': payment_status,
})
if self.invoice.subscription.service_type == SubscriptionType.IMPLEMENTATION:
from corehq.apps.accounting.dispatcher import AccountingAdminInterfaceDispatcher
context.update({
'salesforce_contract_id': self.invoice.subscription.salesforce_contract_id,
'billing_account': self.invoice.subscription.account.name,
'billing_contacts': self.invoice.contact_emails,
'admin_invoices_url': "{url}?subscriber={domain}".format(
url=absolute_reverse(AccountingAdminInterfaceDispatcher.name(), args=['invoices']),
domain=self.invoice.get_domain()
)
})
if self.invoice.subscription.account.auto_pay_enabled:
try:
last_4 = getattr(self.invoice.subscription.account.autopay_card, 'last4', None)
except StripePaymentMethod.DoesNotExist:
last_4 = None
context.update({
'auto_pay_user': self.invoice.subscription.account.auto_pay_user,
'last_4': last_4,
})
context.update({
'credits': self.credits,
})
return context
def credits(self):
credits = {
'account': {},
'subscription': {},
}
self._add_product_credits(credits)
self._add_user_credits(credits)
self._add_sms_credits(credits)
self._add_general_credits(credits)
return credits
def _add_product_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__product_rate__isnull=False,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
is_product=True,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'product': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
is_product=True,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'product': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_user_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.USER,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
feature_type=FeatureType.USER,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'user': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
feature_type=FeatureType.USER,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'user': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_sms_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.SMS,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
feature_type=FeatureType.SMS,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'sms': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
feature_type=FeatureType.SMS,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'sms': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def _add_general_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
invoice=self.invoice,
line_item__feature_rate=None,
line_item__product_rate=None,
)
subscription_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_by_subscription_and_features(
self.invoice.subscription,
)
)
if subscription_credits or credit_adjustments.filter(
credit_line__subscription=self.invoice.subscription,
):
credits['subscription'].update({
'general': {
'amount': quantize_accounting_decimal(subscription_credits),
}
})
account_credits = BillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.subscription.account,
)
)
if account_credits or credit_adjustments.filter(
credit_line__subscription=None,
):
credits['account'].update({
'general': {
'amount': quantize_accounting_decimal(account_credits),
}
})
return credits
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s CommCare Billing Statement for Project Space %(domain)s" % {
'month': month_name,
'domain': self.invoice.subscription.subscriber.domain,
}
def email_from(self):
return get_dimagi_from_email()
@staticmethod
def _get_total_balance(credit_lines):
return (
sum([credit_line.balance for credit_line in credit_lines])
if credit_lines else Decimal('0.0')
)
def can_view_statement(self, web_user):
return web_user.is_domain_admin(self.invoice.get_domain())
class CustomerBillingRecord(BillingRecordBase):
invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT)
INVOICE_AUTOPAY_HTML_TEMPLATE = 'accounting/email/invoice_autopayment.html'
INVOICE_AUTOPAY_TEXT_TEMPLATE = 'accounting/email/invoice_autopayment.txt'
INVOICE_HTML_TEMPLATE = 'accounting/email/customer_invoice.html'
INVOICE_TEXT_TEMPLATE = 'accounting/email/customer_invoice.txt'
class Meta(object):
app_label = 'accounting'
@property
def html_template(self):
if self.invoice.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_HTML_TEMPLATE
return self.INVOICE_HTML_TEMPLATE
@property
def text_template(self):
if self.invoice.account.auto_pay_enabled:
return self.INVOICE_AUTOPAY_TEXT_TEMPLATE
return self.INVOICE_TEXT_TEMPLATE
@property
def should_send_email(self):
return not self.invoice.is_hidden
def email_context(self):
from corehq.apps.accounting.views import EnterpriseBillingStatementsView
context = super(CustomerBillingRecord, self).email_context()
is_small_invoice = self.invoice.balance < SMALL_INVOICE_THRESHOLD
payment_status = (_("Paid")
if self.invoice.is_paid or self.invoice.balance == 0
else _("Payment Required"))
domain = self.invoice.subscriptions.first().subscriber.domain
context.update({
'account_name': self.invoice.account.name,
'date_due': self.invoice.date_due,
'is_small_invoice': is_small_invoice,
'total_balance': '{:.2f}'.format(self.invoice.balance),
'is_total_balance_due': self.invoice.balance >= SMALL_INVOICE_THRESHOLD,
'payment_status': payment_status,
'statements_url': absolute_reverse(
EnterpriseBillingStatementsView.urlname, args=[domain]),
})
if self.invoice.account.auto_pay_enabled:
try:
last_4 = getattr(self.invoice.account.autopay_card, 'last4', None)
except StripePaymentMethod.DoesNotExist:
last_4 = None
context.update({
'auto_pay_user': self.invoice.account.auto_pay_user,
'last_4': last_4,
})
context.update({
'credits': self.credits,
})
return context
def credits(self):
credits = {
'account': {},
'subscription': {},
}
self._add_product_credits(credits)
self._add_user_credits(credits)
self._add_sms_credits(credits)
self._add_general_credits(credits)
return credits
def _add_product_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__product_rate__isnull=False
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
is_product=True
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credit_adjustments['subscription'].update({
'product': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
is_product=True
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'product': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_user_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.USER
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
feature_type=FeatureType.USER
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'user': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
feature_type=FeatureType.USER
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'user': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_sms_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate__feature__feature_type=FeatureType.SMS
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions,
feature_type=FeatureType.SMS
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'sms': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account,
feature_type=FeatureType.SMS
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'sms': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _add_general_credits(self, credits):
credit_adjustments = CreditAdjustment.objects.filter(
customer_invoice=self.invoice,
line_item__feature_rate=None,
line_item__product_rate=None
)
subscription_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_subscriptions(
self.invoice.subscriptions
)
)
if subscription_credits or self._subscriptions_in_credit_adjustments(credit_adjustments):
credits['subscription'].update({
'general': {
'amount': quantize_accounting_decimal(subscription_credits)
}
})
account_credits = CustomerBillingRecord._get_total_balance(
CreditLine.get_credits_for_account(
self.invoice.account
)
)
if account_credits or credit_adjustments.filter(credit_line__subscription=None):
credits['account'].update({
'general': {
'amount': quantize_accounting_decimal(account_credits)
}
})
return credits
def _subscriptions_in_credit_adjustments(self, credit_adjustments):
for subscription in self.invoice.subscriptions.all():
if credit_adjustments.filter(
credit_line__subscription=subscription
):
return True
return False
def email_subject(self):
month_name = self.invoice.date_start.strftime("%B")
return "Your %(month)s CommCare Billing Statement for Customer Account %(account_name)s" % {
'month': month_name,
'account_name': self.invoice.account.name,
}
def email_from(self):
return get_dimagi_from_email()
@staticmethod
def _get_total_balance(credit_lines):
return (
sum([credit_line.balance for credit_line in credit_lines])
if credit_lines else Decimal('0.0')
)
def can_view_statement(self, web_user):
for subscription in self.invoice.subscriptions.all():
if web_user.is_domain_admin(subscription.subscriber.domain):
return True
return False
class InvoicePdf(BlobMixin, SafeSaveDocument):
invoice_id = StringProperty()
date_created = DateTimeProperty()
is_wire = BooleanProperty(default=False)
is_customer = BooleanProperty(default=False)
_blobdb_type_code = CODES.invoice
def generate_pdf(self, invoice):
self.save()
domain = invoice.get_domain()
pdf_data = NamedTemporaryFile()
account_name = ''
if invoice.is_customer_invoice:
account_name = invoice.account.name
template = InvoiceTemplate(
pdf_data.name,
invoice_number=invoice.invoice_number,
to_address=get_address_from_invoice(invoice),
project_name=domain,
invoice_date=invoice.date_created.date(),
due_date=invoice.date_due,
date_start=invoice.date_start,
date_end=invoice.date_end,
subtotal=invoice.subtotal,
tax_rate=invoice.tax_rate,
applied_tax=getattr(invoice, 'applied_tax', Decimal('0.000')),
applied_credit=getattr(invoice, 'applied_credit', Decimal('0.000')),
total=invoice.get_total(),
is_wire=invoice.is_wire,
is_customer=invoice.is_customer_invoice,
is_prepayment=invoice.is_wire and invoice.is_prepayment,
account_name=account_name
)
if not invoice.is_wire:
if invoice.is_customer_invoice:
line_items = LineItem.objects.filter(customer_invoice=invoice)
else:
line_items = LineItem.objects.filter(subscription_invoice=invoice)
for line_item in line_items:
is_unit = line_item.unit_description is not None
is_quarterly = line_item.invoice.is_customer_invoice and \
line_item.invoice.account.invoicing_plan != InvoicingPlan.MONTHLY
unit_cost = line_item.subtotal
if is_unit:
unit_cost = line_item.unit_cost
if is_quarterly and line_item.base_description is not None:
unit_cost = line_item.product_rate.monthly_fee
description = line_item.base_description or line_item.unit_description
if line_item.quantity > 0:
template.add_item(
description,
line_item.quantity if is_unit or is_quarterly else 1,
unit_cost,
line_item.subtotal,
line_item.applied_credit,
line_item.total
)
if invoice.is_wire and invoice.is_prepayment:
unit_cost = 1
applied_credit = 0
for item in invoice.items:
template.add_item(item['type'],
item['amount'],
unit_cost,
item['amount'],
applied_credit,
item['amount'])
template.get_pdf()
filename = self.get_filename(invoice)
blob_domain = domain or UNKNOWN_DOMAIN
if not settings.UNIT_TESTING:
self.put_attachment(pdf_data, filename, 'application/pdf', domain=blob_domain)
else:
self.put_attachment('', filename, 'application/pdf', domain=blob_domain)
pdf_data.close()
self.invoice_id = str(invoice.id)
self.date_created = datetime.datetime.utcnow()
self.is_wire = invoice.is_wire
self.is_customer = invoice.is_customer_invoice
self.save()
@staticmethod
def get_filename(invoice):
return "statement_%(year)d_%(month)d.pdf" % {
'year': invoice.date_start.year,
'month': invoice.date_start.month,
}
def get_data(self, invoice):
with self.fetch_attachment(self.get_filename(invoice), stream=True) as fh:
return fh.read()
class LineItemManager(models.Manager):
def get_products(self):
return self.get_queryset().filter(feature_rate__exact=None)
def get_features(self):
return self.get_queryset().filter(product_rate__exact=None)
def get_feature_by_type(self, feature_type):
return self.get_queryset().filter(feature_rate__feature__feature_type=feature_type)
class LineItem(models.Model):
subscription_invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True)
customer_invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT, null=True)
feature_rate = models.ForeignKey(FeatureRate, on_delete=models.PROTECT, null=True)
product_rate = models.ForeignKey(SoftwareProductRate, on_delete=models.PROTECT, null=True)
base_description = models.TextField(blank=True, null=True)
base_cost = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
unit_description = models.TextField(blank=True, null=True)
unit_cost = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
quantity = models.IntegerField(default=1, validators=integer_field_validators)
last_modified = models.DateTimeField(auto_now=True)
objects = LineItemManager()
class Meta(object):
app_label = 'accounting'
@property
def invoice(self):
if self.subscription_invoice:
return self.subscription_invoice
else:
return self.customer_invoice
@invoice.setter
def invoice(self, invoice):
if invoice.is_customer_invoice:
self.customer_invoice = invoice
else:
self.subscription_invoice = invoice
@property
def subtotal(self):
if self.customer_invoice and self.customer_invoice.account.invoicing_plan != InvoicingPlan.MONTHLY:
return self.base_cost * self.quantity + self.unit_cost * self.quantity
return self.base_cost + self.unit_cost * self.quantity
@property
def applied_credit(self):
if self.creditadjustment_set.count() == 0:
return Decimal('0.0000')
return sum([credit.amount for credit in self.creditadjustment_set.all()])
@property
def total(self):
return self.subtotal + self.applied_credit
def calculate_credit_adjustments(self):
current_total = self.total
credit_lines = CreditLine.get_credits_for_line_item(self)
CreditLine.apply_credits_toward_balance(credit_lines, current_total, line_item=self)
class CreditLine(models.Model):
account = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
subscription = models.ForeignKey(Subscription, on_delete=models.PROTECT, null=True, blank=True)
is_product = models.BooleanField(default=False)
feature_type = models.CharField(max_length=10, null=True, blank=True,
choices=FeatureType.CHOICES)
date_created = models.DateTimeField(auto_now_add=True)
balance = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
is_active = models.BooleanField(default=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def __str__(self):
credit_level = ("Account-Level" if self.subscription is None
else "Subscription-Level")
return ("%(level)s credit [Account %(account_id)d]%(feature)s"
"%(product)s, balance %(balance)s" % {
'level': credit_level,
'account_id': self.account.id,
'feature': (' for Feature %s' % self.feature_type
if self.feature_type is not None else ""),
'product': (' for Product'
if self.is_product else ""),
'balance': self.balance,
})
def save(self, *args, **kwargs):
from corehq.apps.accounting.mixins import (
get_credits_available_for_product_in_account,
get_credits_available_for_product_in_subscription,
)
super(CreditLine, self).save(*args, **kwargs)
if self.account:
get_credits_available_for_product_in_account.clear(self.account)
if self.subscription:
get_credits_available_for_product_in_subscription.clear(self.subscription)
def adjust_credit_balance(self, amount, is_new=False, note=None,
line_item=None, invoice=None, customer_invoice=None,
payment_record=None, related_credit=None,
reason=None, web_user=None):
note = note or ""
if line_item is not None and (invoice is not None or customer_invoice is not None):
raise CreditLineError("You may only have an invoice OR a line item making this adjustment.")
if reason is None:
reason = CreditAdjustmentReason.MANUAL
if payment_record is not None:
reason = CreditAdjustmentReason.DIRECT_PAYMENT
elif related_credit is not None:
reason = CreditAdjustmentReason.TRANSFER
elif invoice is not None:
reason = CreditAdjustmentReason.INVOICE
elif customer_invoice is not None:
reason = CreditAdjustmentReason.INVOICE
elif line_item is not None:
reason = CreditAdjustmentReason.LINE_ITEM
if is_new:
note = "Initialization of credit line. %s" % note
credit_adjustment = CreditAdjustment(
credit_line=self,
note=note,
amount=amount,
reason=reason,
payment_record=payment_record,
line_item=line_item,
invoice=invoice,
customer_invoice=customer_invoice,
related_credit=related_credit,
web_user=web_user,
)
credit_adjustment.save()
self.balance = F('balance') + amount
self.save()
self.refresh_from_db()
@classmethod
def get_credits_for_line_item(cls, line_item):
is_product = line_item.product_rate is not None
feature_type = (
line_item.feature_rate.feature.feature_type
if line_item.feature_rate is not None else None
)
assert is_product or feature_type
assert not (is_product and feature_type)
if line_item.invoice.is_customer_invoice:
return cls.get_credits_for_line_item_in_customer_invoice(line_item, feature_type, is_product)
else:
return cls.get_credits_for_line_item_in_invoice(line_item, feature_type, is_product)
@classmethod
def get_credits_for_line_item_in_invoice(cls, line_item, feature_type, is_product):
if feature_type:
return itertools.chain(
cls.get_credits_by_subscription_and_features(
line_item.invoice.subscription,
feature_type=feature_type,
),
cls.get_credits_for_account(
line_item.invoice.subscription.account,
feature_type=feature_type,
)
)
if is_product:
return itertools.chain(
cls.get_credits_by_subscription_and_features(
line_item.invoice.subscription,
is_product=True,
),
cls.get_credits_for_account(
line_item.invoice.subscription.account,
is_product=True,
)
)
@classmethod
def get_credits_for_line_item_in_customer_invoice(cls, line_item, feature_type, is_product):
if feature_type:
return itertools.chain(
cls.get_credits_for_subscriptions(
subscriptions=line_item.invoice.subscriptions.all(),
feature_type=feature_type
),
cls.get_credits_for_account(
account=line_item.invoice.account,
feature_type=feature_type
)
)
if is_product:
return itertools.chain(
cls.get_credits_for_subscriptions(
subscriptions=line_item.invoice.subscriptions.all(),
is_product=is_product
),
cls.get_credits_for_account(
account=line_item.invoice.account,
is_product=is_product
)
)
@classmethod
def get_credits_for_invoice(cls, invoice):
relevant_credits = [
cls.get_credits_by_subscription_and_features(invoice.subscription),
cls.get_credits_for_account(invoice.subscription.account)
]
if invoice.subscription.next_subscription:
active_sub = Subscription.get_active_subscription_by_domain(
invoice.subscription.subscriber.domain
)
if active_sub.account == invoice.subscription.account:
relevant_credits.append(
cls.get_credits_by_subscription_and_features(active_sub)
)
elif (invoice.subscription.next_subscription.account
== invoice.subscription.account):
relevant_credits.append(
cls.get_credits_by_subscription_and_features(
invoice.subscription.next_subscription
)
)
return itertools.chain(*relevant_credits)
@classmethod
def get_credits_for_customer_invoice(cls, invoice):
return itertools.chain(
cls.get_credits_for_subscriptions(invoice.subscriptions.all()),
cls.get_credits_for_account(invoice.account)
)
@classmethod
def get_credits_for_subscriptions(cls, subscriptions, feature_type=None, is_product=False):
credit_list = cls.objects.none()
for subscription in subscriptions.all():
credit_list = credit_list.union(cls.get_credits_by_subscription_and_features(
subscription,
feature_type=feature_type,
is_product=is_product
))
return credit_list
@classmethod
def get_credits_for_account(cls, account, feature_type=None, is_product=False):
assert not (feature_type and is_product)
return cls.objects.filter(
account=account, subscription__exact=None, is_active=True
).filter(
is_product=is_product, feature_type__exact=feature_type
).all()
@classmethod
def get_credits_by_subscription_and_features(cls, subscription,
feature_type=None,
is_product=False):
assert not (feature_type and is_product)
return cls.objects.filter(
subscription=subscription,
feature_type__exact=feature_type,
is_product=is_product,
is_active=True
).all()
@classmethod
def get_non_general_credits_by_subscription(cls, subscription):
return cls.objects.filter(subscription=subscription, is_active=True).filter(
Q(is_product=True) |
Q(feature_type__in=[f[0] for f in FeatureType.CHOICES])
).all()
@classmethod
def add_credit(cls, amount, account=None, subscription=None,
is_product=False, feature_type=None, payment_record=None,
invoice=None, customer_invoice=None, line_item=None, related_credit=None,
note=None, reason=None, web_user=None, permit_inactive=False):
if account is None and subscription is None:
raise CreditLineError(
"You must specify either a subscription "
"or account to add this credit to."
)
if feature_type is not None and is_product:
raise CreditLineError(
"Can only add credit for a product OR a feature, but not both."
)
account = account or subscription.account
try:
credit_line = cls.objects.get(
account__exact=account,
subscription__exact=subscription,
is_product=is_product,
feature_type__exact=feature_type,
is_active=True
)
if not permit_inactive and not credit_line.is_active and not invoice:
raise CreditLineError(
"Could not add credit to CreditLine %s because it is "
"inactive." % str(credit_line)
)
is_new = False
except cls.MultipleObjectsReturned as e:
raise CreditLineError(
"Could not find a unique credit line for %(account)s"
"%(subscription)s%(feature)s%(product)s. %(error)s"
"instead." % {
'account': "Account ID %d" % account.id,
'subscription': (" | Subscription ID %d" % subscription.id
if subscription is not None else ""),
'feature': (" | Feature %s" % feature_type
if feature_type is not None else ""),
'product': (" | Product" if is_product else ""),
'error': str(e),
}
)
except cls.DoesNotExist:
credit_line = cls.objects.create(
account=account,
subscription=subscription,
is_product=is_product,
feature_type=feature_type,
)
is_new = True
credit_line.adjust_credit_balance(amount, is_new=is_new, note=note,
payment_record=payment_record,
invoice=invoice, customer_invoice=customer_invoice, line_item=line_item,
related_credit=related_credit,
reason=reason, web_user=web_user)
return credit_line
@classmethod
def apply_credits_toward_balance(cls, credit_lines, balance, **kwargs):
for credit_line in credit_lines:
if balance == Decimal('0.0000'):
return
if balance <= Decimal('0.0000'):
raise CreditLineError(
"A balance went below zero dollars when applying credits "
"to credit line %d." % credit_line.pk
)
adjustment_amount = min(credit_line.balance, balance)
if adjustment_amount > Decimal('0.0000'):
credit_line.adjust_credit_balance(-adjustment_amount, **kwargs)
balance -= adjustment_amount
@classmethod
def make_payment_towards_invoice(cls, invoice, payment_record):
if invoice.is_customer_invoice:
billing_account = invoice.account
else:
billing_account = invoice.subscription.account
cls.add_credit(
payment_record.amount,
account=billing_account,
payment_record=payment_record,
)
cls.add_credit(
-payment_record.amount,
account=billing_account,
invoice=invoice,
)
class PaymentMethod(models.Model):
web_user = models.CharField(max_length=80, db_index=True)
method_type = models.CharField(max_length=50,
default=PaymentMethodType.STRIPE,
choices=PaymentMethodType.CHOICES,
db_index=True)
customer_id = models.CharField(max_length=255, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
unique_together = ('web_user', 'method_type')
class StripePaymentMethod(PaymentMethod):
class Meta(object):
proxy = True
app_label = 'accounting'
STRIPE_GENERIC_ERROR = (stripe.error.AuthenticationError,
stripe.error.InvalidRequestError,
stripe.error.APIConnectionError,
stripe.error.StripeError,)
@property
def customer(self):
return self._get_or_create_stripe_customer()
def _get_or_create_stripe_customer(self):
customer = None
if self.customer_id is not None:
try:
customer = self._get_stripe_customer()
except stripe.InvalidRequestError:
pass
if customer is None:
customer = self._create_stripe_customer()
return customer
def _create_stripe_customer(self):
customer = stripe.Customer.create(
description="{}'s cards".format(self.web_user),
email=self.web_user,
)
self.customer_id = customer.id
self.save()
return customer
def _get_stripe_customer(self):
return stripe.Customer.retrieve(self.customer_id)
@property
def all_cards(self):
try:
return [card for card in self.customer.cards.data if card is not None]
except stripe.error.AuthenticationError:
if not settings.STRIPE_PRIVATE_KEY:
log_accounting_info("Private key is not defined in settings")
return []
else:
raise
def all_cards_serialized(self, billing_account):
return [{
'brand': card.brand,
'last4': card.last4,
'exp_month': card.exp_month,
'exp_year': card.exp_year,
'token': card.id,
'is_autopay': self._is_autopay(card, billing_account),
} for card in self.all_cards]
def get_card(self, card_token):
return self.customer.cards.retrieve(card_token)
def get_autopay_card(self, billing_account):
return next((
card for card in self.all_cards
if self._is_autopay(card, billing_account)
), None)
def remove_card(self, card_token):
card = self.get_card(card_token)
self._remove_card_from_all_accounts(card)
card.delete()
def _remove_card_from_all_accounts(self, card):
accounts = BillingAccount.objects.filter(auto_pay_user=self.web_user)
for account in accounts:
if account.autopay_card == card:
account.remove_autopay_user()
def create_card(self, stripe_token, billing_account, domain, autopay=False):
customer = self.customer
card = customer.cards.create(card=stripe_token)
self.set_default_card(card)
if autopay:
self.set_autopay(card, billing_account, domain)
return card
def set_default_card(self, card):
self.customer.default_card = card
self.customer.save()
return card
def set_autopay(self, card, billing_account, domain):
if billing_account.auto_pay_enabled:
self._remove_other_auto_pay_cards(billing_account)
self._update_autopay_status(card, billing_account, autopay=True)
billing_account.update_autopay_user(self.web_user, domain)
def unset_autopay(self, card, billing_account):
if self._is_autopay(card, billing_account):
self._update_autopay_status(card, billing_account, autopay=False)
billing_account.remove_autopay_user()
def _update_autopay_status(self, card, billing_account, autopay):
metadata = card.metadata.copy()
metadata.update({self._auto_pay_card_metadata_key(billing_account): autopay})
card.metadata = metadata
card.save()
def _remove_autopay_card(self, billing_account):
autopay_card = self.get_autopay_card(billing_account)
if autopay_card is not None:
self._update_autopay_status(autopay_card, billing_account, autopay=False)
@staticmethod
def _remove_other_auto_pay_cards(billing_account):
user = billing_account.auto_pay_user
try:
other_payment_method = StripePaymentMethod.objects.get(web_user=user)
other_payment_method._remove_autopay_card(billing_account)
except StripePaymentMethod.DoesNotExist:
pass
@staticmethod
def _is_autopay(card, billing_account):
return card.metadata.get(StripePaymentMethod._auto_pay_card_metadata_key(billing_account)) == 'True'
@staticmethod
def _auto_pay_card_metadata_key(billing_account):
return 'auto_pay_{billing_account_id}'.format(billing_account_id=billing_account.id)
def create_charge(self, card, amount_in_dollars, description):
amount_in_cents = int((amount_in_dollars * Decimal('100')).quantize(Decimal(10)))
transaction_record = stripe.Charge.create(
card=card,
customer=self.customer,
amount=amount_in_cents,
currency=settings.DEFAULT_CURRENCY,
description=description,
)
return transaction_record.id
class PaymentRecord(models.Model):
payment_method = models.ForeignKey(PaymentMethod, on_delete=models.PROTECT,
db_index=True)
date_created = models.DateTimeField(auto_now_add=True)
transaction_id = models.CharField(max_length=255, unique=True)
amount = models.DecimalField(default=Decimal('0.0000'),
max_digits=10, decimal_places=4)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
@property
def public_transaction_id(self):
ops_num = settings.INVOICE_STARTING_NUMBER + self.id
return "%sP-%d" % (settings.INVOICE_PREFIX, ops_num)
@classmethod
def create_record(cls, payment_method, transaction_id, amount):
return cls.objects.create(
payment_method=payment_method,
transaction_id=transaction_id,
amount=amount,
)
class CreditAdjustment(ValidateModelMixin, models.Model):
credit_line = models.ForeignKey(CreditLine, on_delete=models.PROTECT)
reason = models.CharField(max_length=25, default=CreditAdjustmentReason.MANUAL,
choices=CreditAdjustmentReason.CHOICES)
note = models.TextField(blank=True)
amount = models.DecimalField(default=Decimal('0.0000'), max_digits=10, decimal_places=4)
line_item = models.ForeignKey(LineItem, on_delete=models.PROTECT, null=True, blank=True)
invoice = models.ForeignKey(Invoice, on_delete=models.PROTECT, null=True, blank=True)
customer_invoice = models.ForeignKey(CustomerInvoice, on_delete=models.PROTECT, null=True, blank=True)
payment_record = models.ForeignKey(PaymentRecord,
on_delete=models.PROTECT, null=True, blank=True)
related_credit = models.ForeignKey(CreditLine, on_delete=models.PROTECT,
null=True, blank=True, related_name='creditadjustment_related')
date_created = models.DateTimeField(auto_now_add=True)
web_user = models.CharField(max_length=80, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = 'accounting'
def clean(self):
if self.line_item and self.invoice:
raise ValidationError(_("You can't specify both an invoice and a line item."))
class DomainUserHistory(models.Model):
domain = models.CharField(max_length=256)
record_date = models.DateField()
num_users = models.IntegerField(default=0)
class Meta:
unique_together = ('domain', 'record_date')
| true
| true
|
1c45b1afd70b4b9e6a45db74c842c66174c7a49a
| 2,103
|
py
|
Python
|
guild/main_bootstrap.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | null | null | null |
guild/main_bootstrap.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | null | null | null |
guild/main_bootstrap.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstraps env for guild.main.
The primary bootstrap task is to configure sys.path with the location
of Guild's external dependencies. This module supports two modes:
distribution and dev.
External dependencies in distribution mode are assumed to be located
in a single `GUILD_PKG_HOME/external` directory where `GUILD_PKG_HOME`
is the `guild` directory within the Guild distribution location.
As the bootstrap process is used for every Guild command, it must
execute as quickly as possible.
"""
from __future__ import absolute_import
from __future__ import division
import os
import sys
def main():
ensure_external_path()
import guild.main
guild.main.main()
def ensure_external_path():
path = _external_libs_path()
if path not in sys.path:
sys.path.insert(0, path)
def _external_libs_path():
guild_pkg_dir = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(guild_pkg_dir, "external"))
if not os.path.exists(path):
import textwrap
sys.stderr.write("guild: {} does not exist\n".format(path))
sys.stderr.write(
textwrap.fill(
"If you're a Guild developer, run 'python setup.py build' "
"in the Guild project directory and try again. Otherwise "
"please report this as a bug at "
"https://github.com/guildai/guildai/issues."
)
)
sys.stderr.write("\n")
sys.exit(1)
return path
if __name__ == "__main__":
main()
| 29.619718
| 75
| 0.701854
|
from __future__ import absolute_import
from __future__ import division
import os
import sys
def main():
ensure_external_path()
import guild.main
guild.main.main()
def ensure_external_path():
path = _external_libs_path()
if path not in sys.path:
sys.path.insert(0, path)
def _external_libs_path():
guild_pkg_dir = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(guild_pkg_dir, "external"))
if not os.path.exists(path):
import textwrap
sys.stderr.write("guild: {} does not exist\n".format(path))
sys.stderr.write(
textwrap.fill(
"If you're a Guild developer, run 'python setup.py build' "
"in the Guild project directory and try again. Otherwise "
"please report this as a bug at "
"https://github.com/guildai/guildai/issues."
)
)
sys.stderr.write("\n")
sys.exit(1)
return path
if __name__ == "__main__":
main()
| true
| true
|
1c45b1e58aee7713ff142e357f97c11aaa11ed05
| 1,246
|
py
|
Python
|
discord/utils.py
|
rf20008/nextcord
|
48ae815f226e9f7f2f4076c68b6589563144d67b
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
rf20008/nextcord
|
48ae815f226e9f7f2f4076c68b6589563144d67b
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
rf20008/nextcord
|
48ae815f226e9f7f2f4076c68b6589563144d67b
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Module to allow for backwards compatibility for existing code and extensions
"""
from nextcord.utils import *
from nextcord.utils import MISSING, DISCORD_EPOCH
| 47.923077
| 76
| 0.804173
|
from nextcord.utils import *
from nextcord.utils import MISSING, DISCORD_EPOCH
| true
| true
|
1c45b20896b287eedc789388d42830cf74be6fa6
| 25,192
|
py
|
Python
|
trac/admin/web_ui.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/admin/web_ui.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/admin/web_ui.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# Copyright (C) 2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
import os
import pkg_resources
import re
import shutil
from functools import partial
from trac import log
from trac.admin.api import IAdminPanelProvider
from trac.core import *
from trac.loader import get_plugin_info
from trac.log import LOG_LEVELS, LOG_LEVEL_ALIASES, LOG_LEVEL_ALIASES_MAP
from trac.perm import IPermissionRequestor, PermissionExistsError, \
PermissionSystem
from trac.util.datefmt import all_timezones, pytz
from trac.util.html import tag
from trac.util.text import exception_to_unicode, unicode_from_base64, \
unicode_to_base64
from trac.util.translation import _, Locale, get_available_locales, \
ngettext, tag_
from trac.web.api import HTTPNotFound, IRequestHandler, \
is_valid_default_handler
from trac.web.chrome import Chrome, INavigationContributor, \
ITemplateProvider, add_notice, add_stylesheet, \
add_warning
from trac.wiki.formatter import format_to_html
_valid_log_levels = set()
_valid_log_levels.update(log.LOG_LEVELS)
_valid_log_levels.update(log.LOG_LEVEL_ALIASES)
class AdminModule(Component):
"""Web administration interface provider and panel manager."""
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
panel_providers = ExtensionPoint(IAdminPanelProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'admin'
def get_navigation_items(self, req):
# The 'Admin' navigation item is only visible if at least one
# admin panel is available
panels, providers = self._get_panels(req)
if panels:
yield 'mainnav', 'admin', tag.a(_("Admin"), href=req.href.admin())
# IRequestHandler methods
def match_request(self, req):
match = re.match('/admin(?:/([^/]+)(?:/([^/]+)(?:/(.+))?)?)?$',
req.path_info)
if match:
req.args['cat_id'] = match.group(1)
req.args['panel_id'] = match.group(2)
req.args['path_info'] = match.group(3)
return True
def process_request(self, req):
panels, providers = self._get_panels(req)
if not panels:
raise HTTPNotFound(_("No administration panels available"))
def _panel_order(panel):
items = panel[::2]
return items[0] != 'general', items != ('general', 'basics'), items
panels.sort(key=_panel_order)
cat_id = req.args.get('cat_id') or panels[0][0]
panel_id = req.args.get('panel_id')
path_info = req.args.get('path_info')
if not panel_id:
try:
panel_id = \
filter(lambda panel: panel[0] == cat_id, panels)[0][2]
except IndexError:
raise HTTPNotFound(_("Unknown administration panel"))
provider = providers.get((cat_id, panel_id))
if not provider:
raise HTTPNotFound(_("Unknown administration panel"))
resp = provider.render_admin_panel(req, cat_id, panel_id, path_info)
template, data = resp[:2]
data.update({
'active_cat': cat_id, 'active_panel': panel_id,
'panel_href': partial(req.href, 'admin', cat_id, panel_id),
'panels': [{
'category': {'id': panel[0], 'label': panel[1]},
'panel': {'id': panel[2], 'label': panel[3]}
} for panel in panels]
})
add_stylesheet(req, 'common/css/admin.css')
return resp
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.admin', 'templates')]
# Internal methods
def _get_panels(self, req):
"""Return a list of available admin panels."""
panels = []
providers = {}
for provider in self.panel_providers:
p = list(provider.get_admin_panels(req) or [])
for panel in p:
providers[(panel[0], panel[2])] = provider
panels += p
return panels, providers
def _save_config(config, req, log, notices=None):
"""Try to save the config, and display either a success notice or a
failure warning.
"""
try:
config.save()
if notices is None:
notices = [_("Your changes have been saved.")]
for notice in notices:
add_notice(req, notice)
except Exception as e:
log.error("Error writing to trac.ini: %s", exception_to_unicode(e))
add_warning(req, _("Error writing to trac.ini, make sure it is "
"writable by the web server. Your changes have "
"not been saved."))
class BasicsAdminPanel(Component):
implements(IAdminPanelProvider)
request_handlers = ExtensionPoint(IRequestHandler)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/basics'):
yield ('general', _("General"), 'basics', _("Basic Settings"))
def render_admin_panel(self, req, cat, page, path_info):
valid_default_handlers = [handler.__class__.__name__
for handler in self.request_handlers
if is_valid_default_handler(handler)]
if Locale:
locale_ids = get_available_locales()
locales = [Locale.parse(locale) for locale in locale_ids]
# don't use str(locale) to prevent storing expanded locale
# identifier, see #11258
languages = sorted((id, locale.display_name)
for id, locale in zip(locale_ids, locales))
else:
locale_ids, locales, languages = [], [], []
if req.method == 'POST':
for option in ('name', 'url', 'descr'):
self.config.set('project', option, req.args.get(option))
default_handler = req.args.get('default_handler')
self.config.set('trac', 'default_handler', default_handler)
default_timezone = req.args.get('default_timezone')
if default_timezone not in all_timezones:
default_timezone = ''
self.config.set('trac', 'default_timezone', default_timezone)
default_language = req.args.get('default_language')
if default_language not in locale_ids:
default_language = ''
self.config.set('trac', 'default_language', default_language)
default_date_format = req.args.get('default_date_format')
if default_date_format != 'iso8601':
default_date_format = ''
self.config.set('trac', 'default_date_format',
default_date_format)
default_dateinfo_format = req.args.get('default_dateinfo_format')
if default_dateinfo_format not in ('relative', 'absolute'):
default_dateinfo_format = 'relative'
self.config.set('trac', 'default_dateinfo_format',
default_dateinfo_format)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
default_handler = self.config.get('trac', 'default_handler')
default_timezone = self.config.get('trac', 'default_timezone')
default_language = self.config.get('trac', 'default_language')
default_date_format = self.config.get('trac', 'default_date_format')
default_dateinfo_format = self.config.get('trac',
'default_dateinfo_format')
data = {
'default_handler': default_handler,
'valid_default_handlers': sorted(valid_default_handlers),
'default_timezone': default_timezone,
'timezones': all_timezones,
'has_pytz': pytz is not None,
'default_language': default_language.replace('-', '_'),
'languages': languages,
'default_date_format': default_date_format,
'default_dateinfo_format': default_dateinfo_format,
'has_babel': Locale is not None,
}
Chrome(self.env).add_textarea_grips(req)
return 'admin_basics.html', data
class LoggingAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/logging'):
yield ('general', _("General"), 'logging', _("Logging"))
def render_admin_panel(self, req, cat, page, path_info):
log_type = self.env.log_type
log_level = self.env.log_level
log_file = self.env.log_file
log_dir = self.env.log_dir
log_types = [
dict(name='none', label=_("None"),
selected=log_type == 'none', disabled=False),
dict(name='stderr', label=_("Console"),
selected=log_type == 'stderr', disabled=False),
dict(name='file', label=_("File"),
selected=log_type == 'file', disabled=False),
dict(name='syslog', label=_("Syslog"),
selected=log_type in ('unix', 'syslog'),
disabled=os.name != 'posix'),
dict(name='eventlog', label=_("Windows event log"),
selected=log_type in ('winlog', 'eventlog', 'nteventlog'),
disabled=os.name != 'nt'),
]
if req.method == 'POST':
changed = False
new_type = req.args.get('log_type')
if new_type not in [t['name'] for t in log_types]:
raise TracError(
_("Unknown log type %(type)s", type=new_type),
_("Invalid log type")
)
new_file = req.args.get('log_file', log_file)
if not new_file:
raise TracError(_("You must specify a log file"),
_("Missing field"))
new_level = req.args.get('log_level', log_level)
if new_level not in _valid_log_levels:
raise TracError(
_("Unknown log level %(level)s", level=new_level),
_("Invalid log level"))
# Create logger to be sure the configuration is valid.
new_file_path = new_file
if not os.path.isabs(new_file_path):
new_file_path = os.path.join(self.env.log_dir, new_file)
try:
logger, handler = \
self.env.create_logger(new_type, new_file_path, new_level,
self.env.log_format)
except Exception as e:
add_warning(req,
tag_("Changes not saved. Logger configuration "
"error: %(error)s. Inspect the log for more "
"information.",
error=tag.code(exception_to_unicode(e))))
self.log.error("Logger configuration error: %s",
exception_to_unicode(e, traceback=True))
else:
handler.close()
if new_type != log_type:
self.config.set('logging', 'log_type', new_type)
changed = True
log_type = new_type
if new_level != log_level:
self.config.set('logging', 'log_level', new_level)
changed = True
log_level = new_level
if new_file != log_file:
self.config.set('logging', 'log_file', new_file)
changed = True
log_file = new_file
if changed:
_save_config(self.config, req, self.log),
req.redirect(req.href.admin(cat, page))
# Order log levels by priority value, with aliases excluded.
all_levels = sorted(log.LOG_LEVEL_MAP, key=log.LOG_LEVEL_MAP.get,
reverse=True)
log_levels = [level for level in all_levels if level in log.LOG_LEVELS]
log_level = LOG_LEVEL_ALIASES_MAP.get(log_level, log_level)
data = {
'type': log_type, 'types': log_types,
'level': log_level, 'levels': log_levels,
'file': log_file, 'dir': log_dir
}
return 'admin_logging.html', {'log': data}
class PermissionAdminPanel(Component):
implements(IAdminPanelProvider, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['PERMISSION_GRANT', 'PERMISSION_REVOKE']
return actions + [('PERMISSION_ADMIN', actions)]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
perm = req.perm('admin', 'general/perm')
if 'PERMISSION_GRANT' in perm or 'PERMISSION_REVOKE' in perm:
yield ('general', _("General"), 'perm', _("Permissions"))
def render_admin_panel(self, req, cat, page, path_info):
perm = PermissionSystem(self.env)
all_actions = perm.get_actions()
if req.method == 'POST':
subject = req.args.get('subject', '').strip()
target = req.args.get('target', '').strip()
action = req.args.get('action')
group = req.args.get('group', '').strip()
if subject and subject.isupper() or \
group and group.isupper() or \
target and target.isupper():
raise TracError(_("All upper-cased tokens are reserved for "
"permission names."))
# Grant permission to subject
if 'add' in req.args and subject and action:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
if action not in all_actions:
raise TracError(_("Unknown action"))
req.perm.require(action)
try:
perm.grant_permission(subject, action)
except TracError as e:
add_warning(req, e)
else:
add_notice(req, _("The subject %(subject)s has been "
"granted the permission %(action)s.",
subject=subject, action=action))
# Add subject to group
elif 'add' in req.args and subject and group:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
for action in sorted(
perm.get_user_permissions(group, expand_meta=False)):
req.perm.require(action,
message=tag_(
"The subject %(subject)s was not added to the "
"group %(group)s. The group has %(perm)s "
"permission and you cannot grant permissions you "
"don't possess.", subject=tag.strong(subject),
group=tag.strong(group), perm=tag.strong(action)))
try:
perm.grant_permission(subject, group)
except TracError as e:
add_warning(req, e)
else:
add_notice(req, _("The subject %(subject)s has been "
"added to the group %(group)s.",
subject=subject, group=group))
# Copy permissions to subject
elif 'copy' in req.args and subject and target:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
subject_permissions = perm.get_users_dict().get(subject, [])
if not subject_permissions:
add_warning(req, _("The subject %(subject)s does not "
"have any permissions.",
subject=subject))
for action in subject_permissions:
if action not in all_actions: # plugin disabled?
self.log.warning("Skipped granting %s to %s: "
"permission unavailable.",
action, target)
else:
if action not in req.perm:
add_warning(req,
_("The permission %(action)s was "
"not granted to %(subject)s "
"because users cannot grant "
"permissions they don't possess.",
action=action, subject=subject))
continue
try:
perm.grant_permission(target, action)
except PermissionExistsError:
pass
else:
add_notice(req, _("The subject %(subject)s has "
"been granted the permission "
"%(action)s.",
subject=target, action=action))
req.redirect(req.href.admin(cat, page))
# Remove permissions action
elif 'remove' in req.args and 'sel' in req.args:
req.perm('admin', 'general/perm').require('PERMISSION_REVOKE')
for key in req.args.getlist('sel'):
subject, action = key.split(':', 1)
subject = unicode_from_base64(subject)
action = unicode_from_base64(action)
if (subject, action) in perm.get_all_permissions():
perm.revoke_permission(subject, action)
add_notice(req, _("The selected permissions have been "
"revoked."))
req.redirect(req.href.admin(cat, page))
return 'admin_perms.html', {
'actions': all_actions,
'allowed_actions': [a for a in all_actions if a in req.perm],
'perms': perm.get_users_dict(),
'groups': perm.get_groups_dict(),
'unicode_to_base64': unicode_to_base64
}
class PluginAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/plugin'):
yield ('general', _("General"), 'plugin', _("Plugins"))
def render_admin_panel(self, req, cat, page, path_info):
if req.method == 'POST':
if 'install' in req.args:
self._do_install(req)
elif 'uninstall' in req.args:
self._do_uninstall(req)
else:
self._do_update(req)
anchor = ''
if 'plugin' in req.args:
anchor = '#no%d' % (req.args.getint('plugin') + 1)
req.redirect(req.href.admin(cat, page) + anchor)
return self._render_view(req)
# Internal methods
def _do_install(self, req):
"""Install a plugin."""
if 'plugin_file' not in req.args:
raise TracError(_("No file uploaded"))
upload = req.args['plugin_file']
if isinstance(upload, unicode) or not upload.filename:
raise TracError(_("No file uploaded"))
plugin_filename = upload.filename.replace('\\', '/').replace(':', '/')
plugin_filename = os.path.basename(plugin_filename)
if not plugin_filename:
raise TracError(_("No file uploaded"))
if not plugin_filename.endswith('.egg') and \
not plugin_filename.endswith('.py'):
raise TracError(_("Uploaded file is not a Python source file or "
"egg"))
target_path = os.path.join(self.env.plugins_dir, plugin_filename)
if os.path.isfile(target_path):
raise TracError(_("Plugin %(name)s already installed",
name=plugin_filename))
self.log.info("Installing plugin %s", plugin_filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
try:
flags += os.O_BINARY
except AttributeError:
# OS_BINARY not available on every platform
pass
with os.fdopen(os.open(target_path, flags, 0o666), 'w') as target_file:
shutil.copyfileobj(upload.file, target_file)
self.log.info("Plugin %s installed to %s", plugin_filename,
target_path)
# TODO: Validate that the uploaded file is a valid Trac plugin
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_uninstall(self, req):
"""Uninstall a plugin."""
plugin_filename = req.args.get('plugin_filename')
if not plugin_filename:
return
plugin_path = os.path.join(self.env.plugins_dir, plugin_filename)
if not os.path.isfile(plugin_path):
return
self.log.info("Uninstalling plugin %s", plugin_filename)
os.remove(plugin_path)
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_update(self, req):
"""Update component enable state."""
components = req.args.getlist('component')
enabled = req.args.getlist('enable')
added, removed = [], []
# FIXME: this needs to be more intelligent and minimize multiple
# component names to prefix rules
for component in components:
is_enabled = bool(self.env.is_component_enabled(component))
must_enable = component in enabled
if is_enabled != must_enable:
self.config.set('components', component,
'disabled' if is_enabled else 'enabled')
self.log.info("%sabling component %s",
"Dis" if is_enabled else "En", component)
if must_enable:
added.append(component)
else:
removed.append(component)
if added or removed:
def make_list(items):
parts = [item.rsplit('.', 1) for item in items]
return tag.table(tag.tbody(
tag.tr(tag.td(c, class_='trac-name'),
tag.td('(%s.*)' % m, class_='trac-name'))
for m, c in parts), class_='trac-pluglist')
added.sort()
removed.sort()
notices = []
if removed:
msg = ngettext("The following component has been disabled:",
"The following components have been disabled:",
len(removed))
notices.append(tag(msg, make_list(removed)))
if added:
msg = ngettext("The following component has been enabled:",
"The following components have been enabled:",
len(added))
notices.append(tag(msg, make_list(added)))
# set the default value of options for only the enabled components
for component in added:
self.config.set_defaults(component=component)
_save_config(self.config, req, self.log, notices)
def _render_view(self, req):
plugins = get_plugin_info(self.env, include_core=True)
def safe_wiki_to_html(context, text):
try:
return format_to_html(self.env, context, text)
except Exception as e:
self.log.error("Unable to render component documentation: %s",
exception_to_unicode(e, traceback=True))
return tag.pre(text)
data = {
'plugins': plugins, 'show': req.args.get('show'),
'readonly': not os.access(self.env.plugins_dir,
os.F_OK + os.W_OK),
'safe_wiki_to_html': safe_wiki_to_html,
}
return 'admin_plugins.html', data
| 41.230769
| 79
| 0.551286
|
import os
import pkg_resources
import re
import shutil
from functools import partial
from trac import log
from trac.admin.api import IAdminPanelProvider
from trac.core import *
from trac.loader import get_plugin_info
from trac.log import LOG_LEVELS, LOG_LEVEL_ALIASES, LOG_LEVEL_ALIASES_MAP
from trac.perm import IPermissionRequestor, PermissionExistsError, \
PermissionSystem
from trac.util.datefmt import all_timezones, pytz
from trac.util.html import tag
from trac.util.text import exception_to_unicode, unicode_from_base64, \
unicode_to_base64
from trac.util.translation import _, Locale, get_available_locales, \
ngettext, tag_
from trac.web.api import HTTPNotFound, IRequestHandler, \
is_valid_default_handler
from trac.web.chrome import Chrome, INavigationContributor, \
ITemplateProvider, add_notice, add_stylesheet, \
add_warning
from trac.wiki.formatter import format_to_html
_valid_log_levels = set()
_valid_log_levels.update(log.LOG_LEVELS)
_valid_log_levels.update(log.LOG_LEVEL_ALIASES)
class AdminModule(Component):
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
panel_providers = ExtensionPoint(IAdminPanelProvider)
def get_active_navigation_item(self, req):
return 'admin'
def get_navigation_items(self, req):
panels, providers = self._get_panels(req)
if panels:
yield 'mainnav', 'admin', tag.a(_("Admin"), href=req.href.admin())
def match_request(self, req):
match = re.match('/admin(?:/([^/]+)(?:/([^/]+)(?:/(.+))?)?)?$',
req.path_info)
if match:
req.args['cat_id'] = match.group(1)
req.args['panel_id'] = match.group(2)
req.args['path_info'] = match.group(3)
return True
def process_request(self, req):
panels, providers = self._get_panels(req)
if not panels:
raise HTTPNotFound(_("No administration panels available"))
def _panel_order(panel):
items = panel[::2]
return items[0] != 'general', items != ('general', 'basics'), items
panels.sort(key=_panel_order)
cat_id = req.args.get('cat_id') or panels[0][0]
panel_id = req.args.get('panel_id')
path_info = req.args.get('path_info')
if not panel_id:
try:
panel_id = \
filter(lambda panel: panel[0] == cat_id, panels)[0][2]
except IndexError:
raise HTTPNotFound(_("Unknown administration panel"))
provider = providers.get((cat_id, panel_id))
if not provider:
raise HTTPNotFound(_("Unknown administration panel"))
resp = provider.render_admin_panel(req, cat_id, panel_id, path_info)
template, data = resp[:2]
data.update({
'active_cat': cat_id, 'active_panel': panel_id,
'panel_href': partial(req.href, 'admin', cat_id, panel_id),
'panels': [{
'category': {'id': panel[0], 'label': panel[1]},
'panel': {'id': panel[2], 'label': panel[3]}
} for panel in panels]
})
add_stylesheet(req, 'common/css/admin.css')
return resp
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.admin', 'templates')]
def _get_panels(self, req):
panels = []
providers = {}
for provider in self.panel_providers:
p = list(provider.get_admin_panels(req) or [])
for panel in p:
providers[(panel[0], panel[2])] = provider
panels += p
return panels, providers
def _save_config(config, req, log, notices=None):
try:
config.save()
if notices is None:
notices = [_("Your changes have been saved.")]
for notice in notices:
add_notice(req, notice)
except Exception as e:
log.error("Error writing to trac.ini: %s", exception_to_unicode(e))
add_warning(req, _("Error writing to trac.ini, make sure it is "
"writable by the web server. Your changes have "
"not been saved."))
class BasicsAdminPanel(Component):
implements(IAdminPanelProvider)
request_handlers = ExtensionPoint(IRequestHandler)
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/basics'):
yield ('general', _("General"), 'basics', _("Basic Settings"))
def render_admin_panel(self, req, cat, page, path_info):
valid_default_handlers = [handler.__class__.__name__
for handler in self.request_handlers
if is_valid_default_handler(handler)]
if Locale:
locale_ids = get_available_locales()
locales = [Locale.parse(locale) for locale in locale_ids]
# identifier, see #11258
languages = sorted((id, locale.display_name)
for id, locale in zip(locale_ids, locales))
else:
locale_ids, locales, languages = [], [], []
if req.method == 'POST':
for option in ('name', 'url', 'descr'):
self.config.set('project', option, req.args.get(option))
default_handler = req.args.get('default_handler')
self.config.set('trac', 'default_handler', default_handler)
default_timezone = req.args.get('default_timezone')
if default_timezone not in all_timezones:
default_timezone = ''
self.config.set('trac', 'default_timezone', default_timezone)
default_language = req.args.get('default_language')
if default_language not in locale_ids:
default_language = ''
self.config.set('trac', 'default_language', default_language)
default_date_format = req.args.get('default_date_format')
if default_date_format != 'iso8601':
default_date_format = ''
self.config.set('trac', 'default_date_format',
default_date_format)
default_dateinfo_format = req.args.get('default_dateinfo_format')
if default_dateinfo_format not in ('relative', 'absolute'):
default_dateinfo_format = 'relative'
self.config.set('trac', 'default_dateinfo_format',
default_dateinfo_format)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
default_handler = self.config.get('trac', 'default_handler')
default_timezone = self.config.get('trac', 'default_timezone')
default_language = self.config.get('trac', 'default_language')
default_date_format = self.config.get('trac', 'default_date_format')
default_dateinfo_format = self.config.get('trac',
'default_dateinfo_format')
data = {
'default_handler': default_handler,
'valid_default_handlers': sorted(valid_default_handlers),
'default_timezone': default_timezone,
'timezones': all_timezones,
'has_pytz': pytz is not None,
'default_language': default_language.replace('-', '_'),
'languages': languages,
'default_date_format': default_date_format,
'default_dateinfo_format': default_dateinfo_format,
'has_babel': Locale is not None,
}
Chrome(self.env).add_textarea_grips(req)
return 'admin_basics.html', data
class LoggingAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/logging'):
yield ('general', _("General"), 'logging', _("Logging"))
def render_admin_panel(self, req, cat, page, path_info):
log_type = self.env.log_type
log_level = self.env.log_level
log_file = self.env.log_file
log_dir = self.env.log_dir
log_types = [
dict(name='none', label=_("None"),
selected=log_type == 'none', disabled=False),
dict(name='stderr', label=_("Console"),
selected=log_type == 'stderr', disabled=False),
dict(name='file', label=_("File"),
selected=log_type == 'file', disabled=False),
dict(name='syslog', label=_("Syslog"),
selected=log_type in ('unix', 'syslog'),
disabled=os.name != 'posix'),
dict(name='eventlog', label=_("Windows event log"),
selected=log_type in ('winlog', 'eventlog', 'nteventlog'),
disabled=os.name != 'nt'),
]
if req.method == 'POST':
changed = False
new_type = req.args.get('log_type')
if new_type not in [t['name'] for t in log_types]:
raise TracError(
_("Unknown log type %(type)s", type=new_type),
_("Invalid log type")
)
new_file = req.args.get('log_file', log_file)
if not new_file:
raise TracError(_("You must specify a log file"),
_("Missing field"))
new_level = req.args.get('log_level', log_level)
if new_level not in _valid_log_levels:
raise TracError(
_("Unknown log level %(level)s", level=new_level),
_("Invalid log level"))
# Create logger to be sure the configuration is valid.
new_file_path = new_file
if not os.path.isabs(new_file_path):
new_file_path = os.path.join(self.env.log_dir, new_file)
try:
logger, handler = \
self.env.create_logger(new_type, new_file_path, new_level,
self.env.log_format)
except Exception as e:
add_warning(req,
tag_("Changes not saved. Logger configuration "
"error: %(error)s. Inspect the log for more "
"information.",
error=tag.code(exception_to_unicode(e))))
self.log.error("Logger configuration error: %s",
exception_to_unicode(e, traceback=True))
else:
handler.close()
if new_type != log_type:
self.config.set('logging', 'log_type', new_type)
changed = True
log_type = new_type
if new_level != log_level:
self.config.set('logging', 'log_level', new_level)
changed = True
log_level = new_level
if new_file != log_file:
self.config.set('logging', 'log_file', new_file)
changed = True
log_file = new_file
if changed:
_save_config(self.config, req, self.log),
req.redirect(req.href.admin(cat, page))
# Order log levels by priority value, with aliases excluded.
all_levels = sorted(log.LOG_LEVEL_MAP, key=log.LOG_LEVEL_MAP.get,
reverse=True)
log_levels = [level for level in all_levels if level in log.LOG_LEVELS]
log_level = LOG_LEVEL_ALIASES_MAP.get(log_level, log_level)
data = {
'type': log_type, 'types': log_types,
'level': log_level, 'levels': log_levels,
'file': log_file, 'dir': log_dir
}
return 'admin_logging.html', {'log': data}
class PermissionAdminPanel(Component):
implements(IAdminPanelProvider, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['PERMISSION_GRANT', 'PERMISSION_REVOKE']
return actions + [('PERMISSION_ADMIN', actions)]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
perm = req.perm('admin', 'general/perm')
if 'PERMISSION_GRANT' in perm or 'PERMISSION_REVOKE' in perm:
yield ('general', _("General"), 'perm', _("Permissions"))
def render_admin_panel(self, req, cat, page, path_info):
perm = PermissionSystem(self.env)
all_actions = perm.get_actions()
if req.method == 'POST':
subject = req.args.get('subject', '').strip()
target = req.args.get('target', '').strip()
action = req.args.get('action')
group = req.args.get('group', '').strip()
if subject and subject.isupper() or \
group and group.isupper() or \
target and target.isupper():
raise TracError(_("All upper-cased tokens are reserved for "
"permission names."))
# Grant permission to subject
if 'add' in req.args and subject and action:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
if action not in all_actions:
raise TracError(_("Unknown action"))
req.perm.require(action)
try:
perm.grant_permission(subject, action)
except TracError as e:
add_warning(req, e)
else:
add_notice(req, _("The subject %(subject)s has been "
"granted the permission %(action)s.",
subject=subject, action=action))
# Add subject to group
elif 'add' in req.args and subject and group:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
for action in sorted(
perm.get_user_permissions(group, expand_meta=False)):
req.perm.require(action,
message=tag_(
"The subject %(subject)s was not added to the "
"group %(group)s. The group has %(perm)s "
"permission and you cannot grant permissions you "
"don't possess.", subject=tag.strong(subject),
group=tag.strong(group), perm=tag.strong(action)))
try:
perm.grant_permission(subject, group)
except TracError as e:
add_warning(req, e)
else:
add_notice(req, _("The subject %(subject)s has been "
"added to the group %(group)s.",
subject=subject, group=group))
elif 'copy' in req.args and subject and target:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
subject_permissions = perm.get_users_dict().get(subject, [])
if not subject_permissions:
add_warning(req, _("The subject %(subject)s does not "
"have any permissions.",
subject=subject))
for action in subject_permissions:
if action not in all_actions:
self.log.warning("Skipped granting %s to %s: "
"permission unavailable.",
action, target)
else:
if action not in req.perm:
add_warning(req,
_("The permission %(action)s was "
"not granted to %(subject)s "
"because users cannot grant "
"permissions they don't possess.",
action=action, subject=subject))
continue
try:
perm.grant_permission(target, action)
except PermissionExistsError:
pass
else:
add_notice(req, _("The subject %(subject)s has "
"been granted the permission "
"%(action)s.",
subject=target, action=action))
req.redirect(req.href.admin(cat, page))
# Remove permissions action
elif 'remove' in req.args and 'sel' in req.args:
req.perm('admin', 'general/perm').require('PERMISSION_REVOKE')
for key in req.args.getlist('sel'):
subject, action = key.split(':', 1)
subject = unicode_from_base64(subject)
action = unicode_from_base64(action)
if (subject, action) in perm.get_all_permissions():
perm.revoke_permission(subject, action)
add_notice(req, _("The selected permissions have been "
"revoked."))
req.redirect(req.href.admin(cat, page))
return 'admin_perms.html', {
'actions': all_actions,
'allowed_actions': [a for a in all_actions if a in req.perm],
'perms': perm.get_users_dict(),
'groups': perm.get_groups_dict(),
'unicode_to_base64': unicode_to_base64
}
class PluginAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/plugin'):
yield ('general', _("General"), 'plugin', _("Plugins"))
def render_admin_panel(self, req, cat, page, path_info):
if req.method == 'POST':
if 'install' in req.args:
self._do_install(req)
elif 'uninstall' in req.args:
self._do_uninstall(req)
else:
self._do_update(req)
anchor = ''
if 'plugin' in req.args:
anchor = '
req.redirect(req.href.admin(cat, page) + anchor)
return self._render_view(req)
# Internal methods
def _do_install(self, req):
if 'plugin_file' not in req.args:
raise TracError(_("No file uploaded"))
upload = req.args['plugin_file']
if isinstance(upload, unicode) or not upload.filename:
raise TracError(_("No file uploaded"))
plugin_filename = upload.filename.replace('\\', '/').replace(':', '/')
plugin_filename = os.path.basename(plugin_filename)
if not plugin_filename:
raise TracError(_("No file uploaded"))
if not plugin_filename.endswith('.egg') and \
not plugin_filename.endswith('.py'):
raise TracError(_("Uploaded file is not a Python source file or "
"egg"))
target_path = os.path.join(self.env.plugins_dir, plugin_filename)
if os.path.isfile(target_path):
raise TracError(_("Plugin %(name)s already installed",
name=plugin_filename))
self.log.info("Installing plugin %s", plugin_filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
try:
flags += os.O_BINARY
except AttributeError:
# OS_BINARY not available on every platform
pass
with os.fdopen(os.open(target_path, flags, 0o666), 'w') as target_file:
shutil.copyfileobj(upload.file, target_file)
self.log.info("Plugin %s installed to %s", plugin_filename,
target_path)
# TODO: Validate that the uploaded file is a valid Trac plugin
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_uninstall(self, req):
plugin_filename = req.args.get('plugin_filename')
if not plugin_filename:
return
plugin_path = os.path.join(self.env.plugins_dir, plugin_filename)
if not os.path.isfile(plugin_path):
return
self.log.info("Uninstalling plugin %s", plugin_filename)
os.remove(plugin_path)
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_update(self, req):
components = req.args.getlist('component')
enabled = req.args.getlist('enable')
added, removed = [], []
# FIXME: this needs to be more intelligent and minimize multiple
# component names to prefix rules
for component in components:
is_enabled = bool(self.env.is_component_enabled(component))
must_enable = component in enabled
if is_enabled != must_enable:
self.config.set('components', component,
'disabled' if is_enabled else 'enabled')
self.log.info("%sabling component %s",
"Dis" if is_enabled else "En", component)
if must_enable:
added.append(component)
else:
removed.append(component)
if added or removed:
def make_list(items):
parts = [item.rsplit('.', 1) for item in items]
return tag.table(tag.tbody(
tag.tr(tag.td(c, class_='trac-name'),
tag.td('(%s.*)' % m, class_='trac-name'))
for m, c in parts), class_='trac-pluglist')
added.sort()
removed.sort()
notices = []
if removed:
msg = ngettext("The following component has been disabled:",
"The following components have been disabled:",
len(removed))
notices.append(tag(msg, make_list(removed)))
if added:
msg = ngettext("The following component has been enabled:",
"The following components have been enabled:",
len(added))
notices.append(tag(msg, make_list(added)))
# set the default value of options for only the enabled components
for component in added:
self.config.set_defaults(component=component)
_save_config(self.config, req, self.log, notices)
def _render_view(self, req):
plugins = get_plugin_info(self.env, include_core=True)
def safe_wiki_to_html(context, text):
try:
return format_to_html(self.env, context, text)
except Exception as e:
self.log.error("Unable to render component documentation: %s",
exception_to_unicode(e, traceback=True))
return tag.pre(text)
data = {
'plugins': plugins, 'show': req.args.get('show'),
'readonly': not os.access(self.env.plugins_dir,
os.F_OK + os.W_OK),
'safe_wiki_to_html': safe_wiki_to_html,
}
return 'admin_plugins.html', data
| true
| true
|
1c45b215becc81148e7aeae262a82262f980a51d
| 2,641
|
py
|
Python
|
render.py
|
araistrick/camera_pan_renderer
|
900c6c064ac7d2b460087a16be49204276679e04
|
[
"BSD-3-Clause"
] | 2
|
2021-10-15T22:49:05.000Z
|
2022-02-28T20:26:53.000Z
|
render.py
|
araistrick/camera_pan_renderer
|
900c6c064ac7d2b460087a16be49204276679e04
|
[
"BSD-3-Clause"
] | null | null | null |
render.py
|
araistrick/camera_pan_renderer
|
900c6c064ac7d2b460087a16be49204276679e04
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import argparse
from pathlib import Path
import bpy
import numpy as np
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def use_cuda():
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "CUDA"
print(bpy.context.preferences.addons["cycles"].preferences.get_devices())
bpy.context.preferences.addons["cycles"].preferences.devices[0].use = True
bpy.context.scene.cycles.device = "GPU"
bpy.context.scene.render.tile_x = 128
bpy.context.scene.render.tile_x = 128
print('Using GPU device:', bpy.context.preferences.addons["cycles"].preferences.devices[0])
def select_none():
for obj in bpy.data.objects:
obj.select_set(False)
def render_ply(args, ply_path):
ply_name = ply_path.parts[-1]
ply_id = '_'.join(list(ply_name.split('_'))[1:])
# import the requisite ply
select_none()
print(f"Importing {ply_path}")
bpy.ops.import_mesh.ply(filepath=str(ply_path))
imported_ply = bpy.context.selected_objects[0]
# rotate it correctly
imported_ply.rotation_euler = np.radians(np.array(args.override_ply_euler))
# make it colored according to vertex colors
material = next(m for m in bpy.data.materials if m.name == args.template_material_name)
if imported_ply.data.materials:
imported_ply.data.materials[0] = material
else:
imported_ply.data.materials.append(material)
# configure render output location
outpath = Path(args.output_folder)/ply_id
outpath.mkdir(exist_ok=True, parents=True)
bpy.context.scene.render.filepath = str(outpath) + '/'
bpy.ops.render.render(animation=True, write_still=True)
# clean up
select_none()
imported_ply.select_set(True)
bpy.ops.object.delete()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_folder', type=str)
parser.add_argument('output_folder', type=str)
parser.add_argument('--template_file', type=str, default='template.blend')
parser.add_argument('--override_ply_euler', type=int, nargs='+', default=[90, 0, 0])
parser.add_argument('--template_material_name', type=str, default='vertex color')
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
bpy.ops.wm.open_mainfile(filepath=args.template_file)
if args.cuda:
use_cuda()
input_paths = list(Path(args.input_folder).glob('*.ply'))
print(f"Starting processing of {len(input_paths)} .plys from {args.input_folder}")
for ply_path in input_paths:
render_ply(args, ply_path)
if __name__ == '__main__':
main()
| 33.43038
| 95
| 0.710716
|
import os
import argparse
from pathlib import Path
import bpy
import numpy as np
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def use_cuda():
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "CUDA"
print(bpy.context.preferences.addons["cycles"].preferences.get_devices())
bpy.context.preferences.addons["cycles"].preferences.devices[0].use = True
bpy.context.scene.cycles.device = "GPU"
bpy.context.scene.render.tile_x = 128
bpy.context.scene.render.tile_x = 128
print('Using GPU device:', bpy.context.preferences.addons["cycles"].preferences.devices[0])
def select_none():
for obj in bpy.data.objects:
obj.select_set(False)
def render_ply(args, ply_path):
ply_name = ply_path.parts[-1]
ply_id = '_'.join(list(ply_name.split('_'))[1:])
select_none()
print(f"Importing {ply_path}")
bpy.ops.import_mesh.ply(filepath=str(ply_path))
imported_ply = bpy.context.selected_objects[0]
imported_ply.rotation_euler = np.radians(np.array(args.override_ply_euler))
material = next(m for m in bpy.data.materials if m.name == args.template_material_name)
if imported_ply.data.materials:
imported_ply.data.materials[0] = material
else:
imported_ply.data.materials.append(material)
outpath = Path(args.output_folder)/ply_id
outpath.mkdir(exist_ok=True, parents=True)
bpy.context.scene.render.filepath = str(outpath) + '/'
bpy.ops.render.render(animation=True, write_still=True)
select_none()
imported_ply.select_set(True)
bpy.ops.object.delete()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_folder', type=str)
parser.add_argument('output_folder', type=str)
parser.add_argument('--template_file', type=str, default='template.blend')
parser.add_argument('--override_ply_euler', type=int, nargs='+', default=[90, 0, 0])
parser.add_argument('--template_material_name', type=str, default='vertex color')
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
bpy.ops.wm.open_mainfile(filepath=args.template_file)
if args.cuda:
use_cuda()
input_paths = list(Path(args.input_folder).glob('*.ply'))
print(f"Starting processing of {len(input_paths)} .plys from {args.input_folder}")
for ply_path in input_paths:
render_ply(args, ply_path)
if __name__ == '__main__':
main()
| true
| true
|
1c45b269ee0360c0a0e853445b9985838bcb82f4
| 1,210
|
py
|
Python
|
examples/tutorials/pong/steps/step4/main.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 9
|
2016-09-03T07:20:01.000Z
|
2020-05-21T14:44:48.000Z
|
examples/tutorials/pong/steps/step4/main.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 1
|
2017-05-30T20:45:15.000Z
|
2017-05-30T20:45:15.000Z
|
examples/tutorials/pong/steps/step4/main.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 4
|
2016-09-10T15:27:54.000Z
|
2020-03-27T22:05:31.000Z
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
def serve_ball(self):
self.ball.center = self.center
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# bounce off top and bottom
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
# bounce off left and right
if (self.ball.x < 0) or (self.ball.right > self.width):
self.ball.velocity_x *= -1
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ == '__main__':
PongApp().run()
| 25.208333
| 68
| 0.65124
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
def serve_ball(self):
self.ball.center = self.center
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
if (self.ball.x < 0) or (self.ball.right > self.width):
self.ball.velocity_x *= -1
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ == '__main__':
PongApp().run()
| true
| true
|
1c45b360ed6e478c667bfe1ca7f4f430632593d1
| 10,978
|
py
|
Python
|
packages/python/plotly/plotly/validators/_candlestick.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 7
|
2021-09-29T09:46:36.000Z
|
2022-03-24T08:30:41.000Z
|
packages/python/plotly/plotly/validators/_candlestick.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 1
|
2021-09-30T16:56:21.000Z
|
2021-10-15T09:14:12.000Z
|
packages/python/plotly/plotly/validators/_candlestick.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 1
|
2021-09-29T22:34:05.000Z
|
2021-09-29T22:34:05.000Z
|
import _plotly_utils.basevalidators
class CandlestickValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="candlestick", parent_name="", **kwargs):
super(CandlestickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Candlestick"),
data_docs=kwargs.pop(
"data_docs",
"""
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud
for close .
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
decreasing
:class:`plotly.graph_objects.candlestick.Decrea
sing` instance or dict with compatible
properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud
for high .
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.candlestick.Hoverl
abel` instance or dict with compatible
properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
increasing
:class:`plotly.graph_objects.candlestick.Increa
sing` instance or dict with compatible
properties
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
line
:class:`plotly.graph_objects.candlestick.Line`
instance or dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud
for low .
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud
for open .
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
:class:`plotly.graph_objects.candlestick.Stream
` instance or dict with compatible properties
text
Sets hover text elements associated with each
sample point. If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
whiskerwidth
Sets the width of the whiskers relative to the
box' width. For example, with 1, the whiskers
are as wide as the box(es).
x
Sets the x coordinates. If absent, linear
coordinate will be generated.
xaxis
Sets a reference between this trace's x
coordinates and a 2D cartesian x axis. If "x"
(the default value), the x coordinates refer to
`layout.xaxis`. If "x2", the x coordinates
refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rule for `x`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date".
Sets the period positioning in milliseconds or
"M<n>" on the x axis. Special values in the
form of "M<n>" could be used to declare the
number of months. In this case `n` must be a
positive integer.
xperiod0
Only relevant when the axis `type` is "date".
Sets the base for period positioning in
milliseconds or date string on the x0 axis.
When `x0period` is round number of weeks, the
`x0period0` by default would be on a Sunday
i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date".
Sets the alignment of data points on the x
axis.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
yaxis
Sets a reference between this trace's y
coordinates and a 2D cartesian y axis. If "y"
(the default value), the y coordinates refer to
`layout.yaxis`. If "y2", the y coordinates
refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rule for `y`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `yaxis.hoverformat`.
""",
),
**kwargs
)
| 46.12605
| 76
| 0.538167
|
import _plotly_utils.basevalidators
class CandlestickValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="candlestick", parent_name="", **kwargs):
super(CandlestickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Candlestick"),
data_docs=kwargs.pop(
"data_docs",
"""
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud
for close .
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
decreasing
:class:`plotly.graph_objects.candlestick.Decrea
sing` instance or dict with compatible
properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud
for high .
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.candlestick.Hoverl
abel` instance or dict with compatible
properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
increasing
:class:`plotly.graph_objects.candlestick.Increa
sing` instance or dict with compatible
properties
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
line
:class:`plotly.graph_objects.candlestick.Line`
instance or dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud
for low .
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud
for open .
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
:class:`plotly.graph_objects.candlestick.Stream
` instance or dict with compatible properties
text
Sets hover text elements associated with each
sample point. If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
whiskerwidth
Sets the width of the whiskers relative to the
box' width. For example, with 1, the whiskers
are as wide as the box(es).
x
Sets the x coordinates. If absent, linear
coordinate will be generated.
xaxis
Sets a reference between this trace's x
coordinates and a 2D cartesian x axis. If "x"
(the default value), the x coordinates refer to
`layout.xaxis`. If "x2", the x coordinates
refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rule for `x`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date".
Sets the period positioning in milliseconds or
"M<n>" on the x axis. Special values in the
form of "M<n>" could be used to declare the
number of months. In this case `n` must be a
positive integer.
xperiod0
Only relevant when the axis `type` is "date".
Sets the base for period positioning in
milliseconds or date string on the x0 axis.
When `x0period` is round number of weeks, the
`x0period0` by default would be on a Sunday
i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date".
Sets the alignment of data points on the x
axis.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
yaxis
Sets a reference between this trace's y
coordinates and a 2D cartesian y axis. If "y"
(the default value), the y coordinates refer to
`layout.yaxis`. If "y2", the y coordinates
refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rule for `y`
using d3 formatting mini-languages which are
very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format By default the values are
formatted using `yaxis.hoverformat`.
""",
),
**kwargs
)
| true
| true
|
1c45b39ba990a7c522df62adb4f9bedffe167392
| 60,426
|
py
|
Python
|
pandas/core/internals/managers.py
|
joybhallaa/pandas
|
1779155552631a30d4bb176dec70b8cc477defd7
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2022-02-02T02:05:28.000Z
|
2022-02-02T02:09:37.000Z
|
pandas/core/internals/managers.py
|
north-star-saj/pandas
|
fc9fdba6592bdb5d0d1147ce4d65639acd897565
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/managers.py
|
north-star-saj/pandas
|
fc9fdba6592bdb5d0d1147ce4d65639acd897565
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-28T03:32:40.000Z
|
2020-10-28T03:32:40.000Z
|
from __future__ import annotations
from collections import defaultdict
import itertools
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
import numpy as np
from pandas._libs import internals as libinternals, lib
from pandas._typing import ArrayLike, Dtype, DtypeObj, Shape
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import find_common_type, infer_dtype_from_scalar
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_dtype_equal,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCPandasArray, ABCSeries
from pandas.core.dtypes.missing import array_equals, isna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import extract_array
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import Float64Index, Index, ensure_index
from pandas.core.internals.base import DataManager
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
extend_blocks,
get_block_type,
make_block,
safe_reshape,
)
from pandas.core.internals.ops import blockwise_all, operate_blockwise
# TODO: flexible with index=None and/or items=None
T = TypeVar("T", bound="BlockManager")
class BlockManager(DataManager):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
blocks: Sequence of Block
axes: Sequence of Index
do_integrity_check: bool, default True
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
_blknos: np.ndarray
_blklocs: np.ndarray
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks: Tuple[Block, ...] = tuple(blocks)
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
f"Number of Block dimensions ({block.ndim}) must equal "
f"number of axes ({self.ndim})"
)
if do_integrity_check:
self._verify_integrity()
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
self._blknos = None
self._blklocs = None
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]):
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
return cls(blocks, axes, do_integrity_check=False)
@property
def blknos(self):
"""
Suppose we want to find the array corresponding to our i'th column.
blknos[i] identifies the block from self.blocks that contains this column.
blklocs[i] identifies the column of interest within
self.blocks[self.blknos[i]]
"""
if self._blknos is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blknos
@property
def blklocs(self):
"""
See blknos.__doc__
"""
if self._blklocs is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blklocs
def make_empty(self: T, axes=None) -> T:
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [Index([])] + self.axes[1:]
# preserve dtype if possible
if self.ndim == 1:
assert isinstance(self, SingleBlockManager) # for mypy
blk = self.blocks[0]
arr = blk.values[:0]
nb = blk.make_block_same_class(arr, placement=slice(0, 0), ndim=1)
blocks = [nb]
else:
blocks = []
return type(self).from_blocks(blocks, axes)
def __nonzero__(self) -> bool:
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self) -> Shape:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self.axes[axis] = new_labels
@property
def is_single_block(self) -> bool:
# Assumes we are 2D; overridden by SingleBlockManager
return len(self.blocks) == 1
def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.intp)
new_blklocs = np.empty(self.shape[0], dtype=np.intp)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
# TODO: can we avoid this? it isn't cheap
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self) -> Index:
return self.axes[0]
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_nd(dtypes, self.blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
{"values": b.values, "mgr_locs": b.mgr_locs.indexer}
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs, ndim: int):
# TODO(EA2D): ndim would be unnecessary with 2D EAs
return make_block(values, placement=mgr_locs, ndim=ndim)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
ndim = len(self.axes)
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
self._post_setstate()
def _post_setstate(self) -> None:
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = type(self).__name__
for i, ax in enumerate(self.axes):
if i == 0:
output += f"\nItems: {ax}"
else:
output += f"\nAxis {i}: {ax}"
for block in self.blocks:
output += f"\n{block}"
return output
def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block.shape[1:] != mgr_shape[1:]:
raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
def reduce(
self: T, func: Callable, ignore_failures: bool = False
) -> Tuple[T, np.ndarray]:
"""
Apply reduction function blockwise, returning a single-row BlockManager.
Parameters
----------
func : reduction function
ignore_failures : bool, default False
Whether to drop blocks where func raises TypeError.
Returns
-------
BlockManager
np.ndarray
Indexer of mgr_locs that are retained.
"""
# If 2D, we assume that we're operating column-wise
assert self.ndim == 2
res_blocks: List[Block] = []
for blk in self.blocks:
nbs = blk.reduce(func, ignore_failures)
res_blocks.extend(nbs)
index = Index([None]) # placeholder
if ignore_failures:
if res_blocks:
indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks])
new_mgr = self._combine(res_blocks, copy=False, index=index)
else:
indexer = []
new_mgr = type(self).from_blocks([], [Index([]), index])
else:
indexer = np.arange(self.shape[0])
new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
return new_mgr, indexer
def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:
"""
Apply array_op blockwise with another (aligned) BlockManager.
"""
return operate_blockwise(self, other, array_op)
def apply(
self: T,
f,
align_keys: Optional[List[str]] = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
"""
Iterate over the blocks, collect and create a new BlockManager.
Parameters
----------
f : str or callable
Name of the Block method to apply.
align_keys: List[str] or None, default None
ignore_failures: bool, default False
**kwargs
Keywords to pass to `f`
Returns
-------
BlockManager
"""
assert "filter" not in kwargs
align_keys = align_keys or []
result_blocks: List[Block] = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
else:
kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
else:
# otherwise we have an ndarray
kwargs[k] = obj[b.mgr_locs.indexer]
try:
if callable(f):
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
result_blocks = extend_blocks(applied, result_blocks)
if ignore_failures:
return self._combine(result_blocks)
if len(result_blocks) == 0:
return self.make_empty(self.axes)
return type(self).from_blocks(result_blocks, self.axes)
def quantile(
self,
*,
qs: Float64Index,
axis: int = 0,
transposed: bool = False,
interpolation="linear",
) -> BlockManager:
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: bool, default True. Join together blocks having same
dtype
transposed: bool, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : list of the quantiles to be computed
Returns
-------
BlockManager
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
assert is_list_like(qs) # caller is responsible for this
assert axis == 1 # only ever called this way
new_axes = list(self.axes)
new_axes[1] = Float64Index(qs)
blocks = [
blk.quantile(axis=axis, qs=qs, interpolation=interpolation)
for blk in self.blocks
]
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return type(self)(blocks, new_axes)
def isna(self, func) -> BlockManager:
return self.apply("apply", func=func)
def where(self, other, cond, align: bool, errors: str, axis: int) -> BlockManager:
if align:
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
other = extract_array(other, extract_numpy=True)
return self.apply(
"where",
align_keys=align_keys,
other=other,
cond=cond,
errors=errors,
axis=axis,
)
def setitem(self, indexer, value) -> BlockManager:
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
return self.apply(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
)
def diff(self, n: int, axis: int) -> BlockManager:
return self.apply("diff", n=n, axis=axis)
def interpolate(self, **kwargs) -> BlockManager:
return self.apply("interpolate", **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> BlockManager:
if fill_value is lib.no_default:
fill_value = None
if axis == 0 and self.ndim == 2 and self.nblocks > 1:
# GH#35488 we need to watch out for multi-block cases
# We only get here with fill_value not-lib.no_default
ncols = self.shape[0]
if periods > 0:
indexer = [-1] * periods + list(range(ncols - periods))
else:
nper = abs(periods)
indexer = list(range(nper, ncols)) + [-1] * nper
result = self.reindex_indexer(
self.items,
indexer,
axis=0,
fill_value=fill_value,
allow_dups=True,
consolidate=False,
)
return result
return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value)
def fillna(self, value, limit, inplace: bool, downcast) -> BlockManager:
return self.apply(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
def downcast(self) -> BlockManager:
return self.apply("downcast")
def astype(self, dtype, copy: bool = False, errors: str = "raise") -> BlockManager:
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
) -> BlockManager:
return self.apply(
"convert",
copy=copy,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
)
def replace(self, to_replace, value, inplace: bool, regex: bool) -> BlockManager:
assert np.ndim(value) == 0, value
return self.apply(
"replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
def replace_list(
self: T,
src_list: List[Any],
dest_list: List[Any],
inplace: bool = False,
regex: bool = False,
) -> T:
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
bm = self.apply(
"_replace_list",
src_list=src_list,
dest_list=dest_list,
inplace=inplace,
regex=regex,
)
bm._consolidate_inplace()
return bm
def to_native_types(self, **kwargs) -> BlockManager:
"""
Convert values to native types (strings / python objects) that are used
in formatting (repr / csv).
"""
return self.apply("to_native_types", **kwargs)
def is_consolidated(self) -> bool:
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self) -> None:
dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_numeric_mixed_type(self) -> bool:
return all(block.is_numeric for block in self.blocks)
@property
def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy: bool = False) -> BlockManager:
"""
Select blocks that are bool-dtype and columns from object-dtype blocks
that are all-bool.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
new_blocks = []
for blk in self.blocks:
if blk.dtype == bool:
new_blocks.append(blk)
elif blk.is_object:
nbs = blk._split()
for nb in nbs:
if nb.is_bool:
new_blocks.append(nb)
return self._combine(new_blocks, copy)
def get_numeric_data(self, copy: bool = False) -> BlockManager:
"""
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._combine([b for b in self.blocks if b.is_numeric], copy)
def _combine(
self: T, blocks: List[Block], copy: bool = True, index: Optional[Index] = None
) -> T:
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks: List[Block] = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = inv_indexer[b.mgr_locs.indexer]
new_blocks.append(b)
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
return type(self).from_blocks(new_blocks, axes)
def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
slicer = (slice(None), slobj)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
else:
raise IndexError("Requested axis not found in manager")
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = type(self)(new_blocks, new_axes, do_integrity_check=False)
return bm
@property
def nblocks(self) -> int:
return len(self.blocks)
def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : bool or string, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
res.axes = new_axes
return res
def as_array(
self,
transpose: bool = False,
dtype: Optional[Dtype] = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : bool, default False
If True, transpose the return array.
dtype : object, default None
Data type of the return array.
copy : bool, default False
If True then guarantee that a copy is returned. A value of
False does not guarantee that the underlying data is not
copied.
na_value : object, default lib.no_default
Value to be used as the missing value sentinel.
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
# We want to copy when na_value is provided to avoid
# mutating the original object
copy = copy or na_value is not lib.no_default
if self.is_single_block:
blk = self.blocks[0]
if blk.is_extension:
# Avoid implicit conversion of extension blocks to object
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value).reshape(
blk.shape
)
else:
arr = np.asarray(blk.get_values())
if dtype:
arr = arr.astype(dtype, copy=False)
else:
arr = self._interleave(dtype=dtype, na_value=na_value)
# The underlying data was copied within _interleave
copy = False
if copy:
arr = arr.copy()
if na_value is not lib.no_default:
arr[isna(arr)] = na_value
return arr.transpose() if transpose else arr
def _interleave(
self, dtype: Optional[Dtype] = None, na_value=lib.no_default
) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
if not dtype:
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
elif is_dtype_equal(dtype, str):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
if blk.is_extension:
# Avoid implicit conversion of extension blocks to object
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value)
else:
arr = blk.get_values(dtype)
result[rl.indexer] = arr
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy: bool = True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : bool, default True
Returns
-------
values : a dict of dtype -> BlockManager
"""
bd: Dict[str, List[Block]] = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
# TODO(EA2D): the combine will be unnecessary with 2D EAs
return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc: int) -> ArrayLike:
"""
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
dtype = _interleaved_dtype(self.blocks)
n = len(self)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self) -> BlockManager:
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = type(self)(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self) -> None:
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def iget(self, i: int) -> SingleBlockManager:
"""
Return the data as a SingleBlockManager.
"""
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
),
self.axes[1],
)
def iget_values(self, i: int) -> ArrayLike:
"""
Return the data for column i as the values (ndarray or ExtensionArray).
"""
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
return values
def idelete(self, indexer):
"""
Delete selected locations in-place (new block and array, same BlockManager)
"""
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._rebuild_blknos_and_blklocs()
def iset(self, loc: Union[int, slice, np.ndarray], value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = extract_array(value, extract_numpy=True)
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
if self._blklocs is None and self.ndim > 1:
self._rebuild_blknos_and_blklocs()
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == 2:
value = value.T
if value.ndim == self.ndim - 1:
value = safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
if lib.is_integer(loc):
# We have 6 tests where loc is _not_ an int.
# In this case, get_blkno_placements will yield only one tuple,
# containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
loc = [loc]
# Accessing public blknos ensures the public versions are initialized
blknos = self.blknos[loc]
blklocs = self.blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set_inplace(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = new_blknos[self._blknos]
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks: List[Block] = []
if value_is_extension_type:
# This code (ab-)uses the fact that EA blocks contain only
# one item.
# TODO(EA2D): special casing unnecessary with 2D EAs
new_blocks.extend(
make_block(
values=value,
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {item}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
if value.ndim == 2:
value = value.T
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
# TODO(EA2D): special case not needed with 2D EAs
value = safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self.blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
# Accessing public blklocs ensures the public versions are initialized
if loc == self.blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._known_consolidated = False
if len(self.blocks) > 100:
warnings.warn(
"DataFrame is highly fragmented. This is usually the result "
"of calling `frame.insert` many times, which has poor performance. "
"Consider using pd.concat instead. To get a de-fragmented frame, "
"use `newframe = frame.copy()`",
PerformanceWarning,
stacklevel=5,
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
consolidate: bool = True,
only_slice: bool = False,
) -> T:
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
consolidate: bool, default True
Whether to consolidate inplace before reindexing.
only_slice : bool, default False
Whether to take views, not copies, along columns.
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
if consolidate:
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_value=fill_value, only_slice=only_slice
)
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_value=(
fill_value if fill_value is not None else blk.fill_value
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return type(self).from_blocks(new_blocks, new_axes)
def _slice_take_blocks_ax0(
self, slice_or_indexer, fill_value=lib.no_default, only_slice: bool = False
):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Parameters
----------
slice_or_indexer : slice, ndarray[bool], or list-like of ints
fill_value : scalar, default lib.no_default
only_slice : bool, default False
If True, we always return views on existing arrays, never copies.
This is used when called from ops.blockwise.operate_blockwise.
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_value is not lib.no_default
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self.is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
# GH#32959 EABlock would fail since we can't make 0-width
# TODO(EA2D): special casing unnecessary with 2D EAs
if sllen == 0:
return []
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
fill_value = blk.fill_value
if not allow_fill and only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
blocks = [
blk.getitem_block([ml], new_mgr_locs=i)
for i, ml in enumerate(slobj)
]
return blocks
else:
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_value=fill_value,
)
]
if sl_type in ("slice", "mask"):
blknos = self.blknos[slobj]
blklocs = self.blklocs[slobj]
else:
blknos = algos.take_nd(
self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_nd(
self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
# If we've got here, fill_value was not lib.no_default
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=False)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
# GH#32779 to avoid the performance penalty of copying,
# we may try to only slice
taker = blklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
if only_slice:
taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
nb = blk.getitem_block(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
elif only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
for i, ml in zip(taker, mgr_locs):
nb = blk.getitem_block([i], new_mgr_locs=ml)
blocks.append(nb)
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
return blocks
def _make_na_block(self, placement, fill_value=None):
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement, ndim=block_values.ndim)
def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True):
"""
Take items along any axis.
"""
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels,
indexer=indexer,
axis=axis,
allow_dups=True,
consolidate=False,
)
def _equal_values(self: T, other: T) -> bool:
"""
Used in .equals defined in base class. Only check the column values
assuming shape and indexes have already been checked.
"""
if self.ndim == 1:
# For SingleBlockManager (i.e.Series)
if other.ndim != 1:
return False
left = self.blocks[0].values
right = other.blocks[0].values
return array_equals(left, right)
return blockwise_all(self, other, array_equals)
def unstack(self, unstacker, fill_value) -> BlockManager:
"""
Return a BlockManager with all blocks unstacked..
Parameters
----------
unstacker : reshape._Unstacker
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
new_columns = unstacker.get_new_columns(self.items)
new_index = unstacker.new_index
new_blocks: List[Block] = []
columns_mask: List[np.ndarray] = []
for blk in self.blocks:
blk_cols = self.items[blk.mgr_locs.indexer]
new_items = unstacker.get_new_columns(blk_cols)
new_placement = new_columns.get_indexer(new_items)
blocks, mask = blk._unstack(
unstacker, fill_value, new_placement=new_placement
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
is_single_block = True
def __init__(
self,
block: Block,
axis: Index,
do_integrity_check: bool = False,
fastpath=lib.no_default,
):
assert isinstance(block, Block), type(block)
assert isinstance(axis, Index), type(axis)
if fastpath is not lib.no_default:
warnings.warn(
"The `fastpath` keyword is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
self.axes = [axis]
self.blocks = (block,)
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]) -> SingleBlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
return cls(blocks[0], axes[0], do_integrity_check=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:
"""
Constructor for if we have an array that is not yet a Block.
"""
block = make_block(array, placement=slice(0, len(index)), ndim=1)
return cls(block, index)
def _post_setstate(self):
pass
@property
def _block(self) -> Block:
return self.blocks[0]
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
blk = self._block
array = blk._slice(slobj)
block = blk.make_block_same_class(array, placement=slice(0, len(array)))
return type(self)(block, self.index[slobj])
@property
def index(self) -> Index:
return self.axes[0]
@property
def dtype(self) -> DtypeObj:
return self._block.dtype
def get_dtypes(self) -> np.ndarray:
return np.array([self._block.dtype])
def external_values(self):
"""The array that Series.values returns"""
return self._block.external_values()
def internal_values(self):
"""The array that Series._values returns"""
return self._block.internal_values()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
def is_consolidated(self) -> bool:
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def idelete(self, indexer):
"""
Delete single location from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
self._block.delete(indexer)
self.axes[0] = self.axes[0].delete(indexer)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
raise NotImplementedError("Use series._values[loc] instead")
# --------------------------------------------------------------------
# Constructor Helpers
def create_block_manager_from_blocks(blocks, axes: List[Index]) -> BlockManager:
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(
values=blocks[0], placement=slice(0, len(axes[0])), ndim=2
)
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
raise construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(
arrays, names: Index, axes: List[Index]
) -> BlockManager:
assert isinstance(names, Index)
assert isinstance(axes, list)
assert all(isinstance(x, Index) for x in axes)
# ensure we dont have any PandasArrays when we call get_block_type
# Note: just calling extract_array breaks tests that patch PandasArray._typ.
arrays = [x if not isinstance(x, ABCPandasArray) else x.to_numpy() for x in arrays]
try:
blocks = _form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
# Correcting the user facing error message during dataframe construction
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
# Correcting the user facing error message during dataframe construction
if len(implied) <= 2:
implied = implied[::-1]
# We return the exception object instead of raising it so that we
# can raise it in the caller; mypy plays better with that
if passed == implied and e is not None:
return e
if block_shape[0] == 0:
return ValueError("Empty data passed with indices specified.")
return ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
# -----------------------------------------------------------------------
def _form_blocks(arrays, names: Index, axes: List[Index]) -> List[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
items_dict: DefaultDict[str, List] = defaultdict(list)
extra_locs = []
names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, v))
blocks: List[Block] = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["NumericBlock"]):
complex_blocks = _multi_blockify(items_dict["NumericBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=i, ndim=2)
for i, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=i, ndim=2)
for i, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=i, ndim=2)
for i, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=i, ndim=2)
for i, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs, ndim=2)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype) -> List[Block]:
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement, ndim=2)
return [block]
def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement, ndim=2)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype: np.dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
placement, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + first.shape
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks: Sequence[Block]) -> Optional[DtypeObj]:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks: List[Block] = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks = extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(
blocks: List[Block], dtype: DtypeObj, can_consolidate: bool
) -> List[Block]:
if len(blocks) == 1:
return blocks
if can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
# TODO: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return [make_block(new_values, placement=new_mgr_locs, ndim=2)]
# can't consolidate --> no merge
return blocks
def _fast_count_smallints(arr: np.ndarray) -> np.ndarray:
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length: int, allow_fill: bool):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
| 32.175719
| 88
| 0.574389
|
from __future__ import annotations
from collections import defaultdict
import itertools
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
import numpy as np
from pandas._libs import internals as libinternals, lib
from pandas._typing import ArrayLike, Dtype, DtypeObj, Shape
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import find_common_type, infer_dtype_from_scalar
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_dtype_equal,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCPandasArray, ABCSeries
from pandas.core.dtypes.missing import array_equals, isna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import extract_array
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import Float64Index, Index, ensure_index
from pandas.core.internals.base import DataManager
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
extend_blocks,
get_block_type,
make_block,
safe_reshape,
)
from pandas.core.internals.ops import blockwise_all, operate_blockwise
T = TypeVar("T", bound="BlockManager")
class BlockManager(DataManager):
__slots__ = [
"axes",
"blocks",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
_blknos: np.ndarray
_blklocs: np.ndarray
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks: Tuple[Block, ...] = tuple(blocks)
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
f"Number of Block dimensions ({block.ndim}) must equal "
f"number of axes ({self.ndim})"
)
if do_integrity_check:
self._verify_integrity()
self._known_consolidated = False
self._blknos = None
self._blklocs = None
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]):
return cls(blocks, axes, do_integrity_check=False)
@property
def blknos(self):
if self._blknos is None:
self._rebuild_blknos_and_blklocs()
return self._blknos
@property
def blklocs(self):
if self._blklocs is None:
self._rebuild_blknos_and_blklocs()
return self._blklocs
def make_empty(self: T, axes=None) -> T:
if axes is None:
axes = [Index([])] + self.axes[1:]
if self.ndim == 1:
assert isinstance(self, SingleBlockManager)
blk = self.blocks[0]
arr = blk.values[:0]
nb = blk.make_block_same_class(arr, placement=slice(0, 0), ndim=1)
blocks = [nb]
else:
blocks = []
return type(self).from_blocks(blocks, axes)
def __nonzero__(self) -> bool:
return True
__bool__ = __nonzero__
@property
def shape(self) -> Shape:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis: int, new_labels: Index) -> None:
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self.axes[axis] = new_labels
@property
def is_single_block(self) -> bool:
return len(self.blocks) == 1
def _rebuild_blknos_and_blklocs(self) -> None:
new_blknos = np.empty(self.shape[0], dtype=np.intp)
new_blklocs = np.empty(self.shape[0], dtype=np.intp)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self) -> Index:
return self.axes[0]
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_nd(dtypes, self.blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
{"values": b.values, "mgr_locs": b.mgr_locs.indexer}
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs, ndim: int):
# TODO(EA2D): ndim would be unnecessary with 2D EAs
return make_block(values, placement=mgr_locs, ndim=ndim)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
ndim = len(self.axes)
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
self._post_setstate()
def _post_setstate(self) -> None:
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = type(self).__name__
for i, ax in enumerate(self.axes):
if i == 0:
output += f"\nItems: {ax}"
else:
output += f"\nAxis {i}: {ax}"
for block in self.blocks:
output += f"\n{block}"
return output
def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block.shape[1:] != mgr_shape[1:]:
raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
def reduce(
self: T, func: Callable, ignore_failures: bool = False
) -> Tuple[T, np.ndarray]:
# If 2D, we assume that we're operating column-wise
assert self.ndim == 2
res_blocks: List[Block] = []
for blk in self.blocks:
nbs = blk.reduce(func, ignore_failures)
res_blocks.extend(nbs)
index = Index([None])
if ignore_failures:
if res_blocks:
indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks])
new_mgr = self._combine(res_blocks, copy=False, index=index)
else:
indexer = []
new_mgr = type(self).from_blocks([], [Index([]), index])
else:
indexer = np.arange(self.shape[0])
new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
return new_mgr, indexer
def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:
return operate_blockwise(self, other, array_op)
def apply(
self: T,
f,
align_keys: Optional[List[str]] = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
assert "filter" not in kwargs
align_keys = align_keys or []
result_blocks: List[Block] = []
aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
if obj.ndim == 1:
kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
else:
kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
else:
kwargs[k] = obj[b.mgr_locs.indexer]
try:
if callable(f):
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
result_blocks = extend_blocks(applied, result_blocks)
if ignore_failures:
return self._combine(result_blocks)
if len(result_blocks) == 0:
return self.make_empty(self.axes)
return type(self).from_blocks(result_blocks, self.axes)
def quantile(
self,
*,
qs: Float64Index,
axis: int = 0,
transposed: bool = False,
interpolation="linear",
) -> BlockManager:
assert self.ndim >= 2
assert is_list_like(qs)
assert axis == 1
new_axes = list(self.axes)
new_axes[1] = Float64Index(qs)
blocks = [
blk.quantile(axis=axis, qs=qs, interpolation=interpolation)
for blk in self.blocks
]
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return type(self)(blocks, new_axes)
def isna(self, func) -> BlockManager:
return self.apply("apply", func=func)
def where(self, other, cond, align: bool, errors: str, axis: int) -> BlockManager:
if align:
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
other = extract_array(other, extract_numpy=True)
return self.apply(
"where",
align_keys=align_keys,
other=other,
cond=cond,
errors=errors,
axis=axis,
)
def setitem(self, indexer, value) -> BlockManager:
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
return self.apply(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
)
def diff(self, n: int, axis: int) -> BlockManager:
return self.apply("diff", n=n, axis=axis)
def interpolate(self, **kwargs) -> BlockManager:
return self.apply("interpolate", **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> BlockManager:
if fill_value is lib.no_default:
fill_value = None
if axis == 0 and self.ndim == 2 and self.nblocks > 1:
if periods > 0:
indexer = [-1] * periods + list(range(ncols - periods))
else:
nper = abs(periods)
indexer = list(range(nper, ncols)) + [-1] * nper
result = self.reindex_indexer(
self.items,
indexer,
axis=0,
fill_value=fill_value,
allow_dups=True,
consolidate=False,
)
return result
return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value)
def fillna(self, value, limit, inplace: bool, downcast) -> BlockManager:
return self.apply(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
def downcast(self) -> BlockManager:
return self.apply("downcast")
def astype(self, dtype, copy: bool = False, errors: str = "raise") -> BlockManager:
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
) -> BlockManager:
return self.apply(
"convert",
copy=copy,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
)
def replace(self, to_replace, value, inplace: bool, regex: bool) -> BlockManager:
assert np.ndim(value) == 0, value
return self.apply(
"replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
def replace_list(
self: T,
src_list: List[Any],
dest_list: List[Any],
inplace: bool = False,
regex: bool = False,
) -> T:
inplace = validate_bool_kwarg(inplace, "inplace")
bm = self.apply(
"_replace_list",
src_list=src_list,
dest_list=dest_list,
inplace=inplace,
regex=regex,
)
bm._consolidate_inplace()
return bm
def to_native_types(self, **kwargs) -> BlockManager:
return self.apply("to_native_types", **kwargs)
def is_consolidated(self) -> bool:
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self) -> None:
dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_numeric_mixed_type(self) -> bool:
return all(block.is_numeric for block in self.blocks)
@property
def any_extension_types(self) -> bool:
return any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
if len(self.blocks) == 1:
return self.blocks[0].is_view
return False
def get_bool_data(self, copy: bool = False) -> BlockManager:
new_blocks = []
for blk in self.blocks:
if blk.dtype == bool:
new_blocks.append(blk)
elif blk.is_object:
nbs = blk._split()
for nb in nbs:
if nb.is_bool:
new_blocks.append(nb)
return self._combine(new_blocks, copy)
def get_numeric_data(self, copy: bool = False) -> BlockManager:
return self._combine([b for b in self.blocks if b.is_numeric], copy)
def _combine(
self: T, blocks: List[Block], copy: bool = True, index: Optional[Index] = None
) -> T:
if len(blocks) == 0:
return self.make_empty()
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks: List[Block] = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = inv_indexer[b.mgr_locs.indexer]
new_blocks.append(b)
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
return type(self).from_blocks(new_blocks, axes)
def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
slicer = (slice(None), slobj)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
else:
raise IndexError("Requested axis not found in manager")
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = type(self)(new_blocks, new_axes, do_integrity_check=False)
return bm
@property
def nblocks(self) -> int:
return len(self.blocks)
def copy(self: T, deep=True) -> T:
if deep:
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
res.axes = new_axes
return res
def as_array(
self,
transpose: bool = False,
dtype: Optional[Dtype] = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
copy = copy or na_value is not lib.no_default
if self.is_single_block:
blk = self.blocks[0]
if blk.is_extension:
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value).reshape(
blk.shape
)
else:
arr = np.asarray(blk.get_values())
if dtype:
arr = arr.astype(dtype, copy=False)
else:
arr = self._interleave(dtype=dtype, na_value=na_value)
copy = False
if copy:
arr = arr.copy()
if na_value is not lib.no_default:
arr[isna(arr)] = na_value
return arr.transpose() if transpose else arr
def _interleave(
self, dtype: Optional[Dtype] = None, na_value=lib.no_default
) -> np.ndarray:
if not dtype:
dtype = _interleaved_dtype(self.blocks)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
elif is_dtype_equal(dtype, str):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
if blk.is_extension:
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value)
else:
arr = blk.get_values(dtype)
result[rl.indexer] = arr
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy: bool = True):
bd: Dict[str, List[Block]] = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc: int) -> ArrayLike:
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
dtype = _interleaved_dtype(self.blocks)
n = len(self)
if is_extension_array_dtype(dtype):
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self) -> BlockManager:
if self.is_consolidated():
return self
bm = type(self)(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self) -> None:
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def iget(self, i: int) -> SingleBlockManager:
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
),
self.axes[1],
)
def iget_values(self, i: int) -> ArrayLike:
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
return values
def idelete(self, indexer):
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._rebuild_blknos_and_blklocs()
def iset(self, loc: Union[int, slice, np.ndarray], value):
value = extract_array(value, extract_numpy=True)
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
if self._blklocs is None and self.ndim > 1:
self._rebuild_blknos_and_blklocs()
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == 2:
value = value.T
if value.ndim == self.ndim - 1:
value = safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
if lib.is_integer(loc):
# We have 6 tests where loc is _not_ an int.
# In this case, get_blkno_placements will yield only one tuple,
# containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
loc = [loc]
# Accessing public blknos ensures the public versions are initialized
blknos = self.blknos[loc]
blklocs = self.blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set_inplace(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = new_blknos[self._blknos]
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks: List[Block] = []
if value_is_extension_type:
# This code (ab-)uses the fact that EA blocks contain only
# one item.
# TODO(EA2D): special casing unnecessary with 2D EAs
new_blocks.extend(
make_block(
values=value,
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False):
if not allow_duplicates and item in self.items:
raise ValueError(f"cannot insert {item}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
new_axis = self.items.insert(loc, item)
if value.ndim == 2:
value = value.T
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
value = safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self.blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self.blklocs.shape[0]:
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._known_consolidated = False
if len(self.blocks) > 100:
warnings.warn(
"DataFrame is highly fragmented. This is usually the result "
"of calling `frame.insert` many times, which has poor performance. "
"Consider using pd.concat instead. To get a de-fragmented frame, "
"use `newframe = frame.copy()`",
PerformanceWarning,
stacklevel=5,
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
consolidate: bool = True,
only_slice: bool = False,
) -> T:
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
if consolidate:
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_value=fill_value, only_slice=only_slice
)
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_value=(
fill_value if fill_value is not None else blk.fill_value
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return type(self).from_blocks(new_blocks, new_axes)
def _slice_take_blocks_ax0(
self, slice_or_indexer, fill_value=lib.no_default, only_slice: bool = False
):
allow_fill = fill_value is not lib.no_default
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self.is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
sary with 2D EAs
if sllen == 0:
return []
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
fill_value = blk.fill_value
if not allow_fill and only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
blocks = [
blk.getitem_block([ml], new_mgr_locs=i)
for i, ml in enumerate(slobj)
]
return blocks
else:
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_value=fill_value,
)
]
if sl_type in ("slice", "mask"):
blknos = self.blknos[slobj]
blklocs = self.blklocs[slobj]
else:
blknos = algos.take_nd(
self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_nd(
self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
# If we've got here, fill_value was not lib.no_default
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
if not blk._can_consolidate:
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=False)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
lklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
if only_slice:
taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
nb = blk.getitem_block(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
elif only_slice:
for i, ml in zip(taker, mgr_locs):
nb = blk.getitem_block([i], new_mgr_locs=ml)
blocks.append(nb)
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
return blocks
def _make_na_block(self, placement, fill_value=None):
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement, ndim=block_values.ndim)
def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True):
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels,
indexer=indexer,
axis=axis,
allow_dups=True,
consolidate=False,
)
def _equal_values(self: T, other: T) -> bool:
if self.ndim == 1:
if other.ndim != 1:
return False
left = self.blocks[0].values
right = other.blocks[0].values
return array_equals(left, right)
return blockwise_all(self, other, array_equals)
def unstack(self, unstacker, fill_value) -> BlockManager:
new_columns = unstacker.get_new_columns(self.items)
new_index = unstacker.new_index
new_blocks: List[Block] = []
columns_mask: List[np.ndarray] = []
for blk in self.blocks:
blk_cols = self.items[blk.mgr_locs.indexer]
new_items = unstacker.get_new_columns(blk_cols)
new_placement = new_columns.get_indexer(new_items)
blocks, mask = blk._unstack(
unstacker, fill_value, new_placement=new_placement
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
is_single_block = True
def __init__(
self,
block: Block,
axis: Index,
do_integrity_check: bool = False,
fastpath=lib.no_default,
):
assert isinstance(block, Block), type(block)
assert isinstance(axis, Index), type(axis)
if fastpath is not lib.no_default:
warnings.warn(
"The `fastpath` keyword is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
self.axes = [axis]
self.blocks = (block,)
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]) -> SingleBlockManager:
assert len(blocks) == 1
assert len(axes) == 1
return cls(blocks[0], axes[0], do_integrity_check=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:
block = make_block(array, placement=slice(0, len(index)), ndim=1)
return cls(block, index)
def _post_setstate(self):
pass
@property
def _block(self) -> Block:
return self.blocks[0]
@property
def _blknos(self):
return None
@property
def _blklocs(self):
return None
def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
blk = self._block
array = blk._slice(slobj)
block = blk.make_block_same_class(array, placement=slice(0, len(array)))
return type(self)(block, self.index[slobj])
@property
def index(self) -> Index:
return self.axes[0]
@property
def dtype(self) -> DtypeObj:
return self._block.dtype
def get_dtypes(self) -> np.ndarray:
return np.array([self._block.dtype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
def is_consolidated(self) -> bool:
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def idelete(self, indexer):
self._block.delete(indexer)
self.axes[0] = self.axes[0].delete(indexer)
def fast_xs(self, loc):
raise NotImplementedError("Use series._values[loc] instead")
def create_block_manager_from_blocks(blocks, axes: List[Index]) -> BlockManager:
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
if not len(blocks[0]):
blocks = []
else:
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(
values=blocks[0], placement=slice(0, len(axes[0])), ndim=2
)
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
raise construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(
arrays, names: Index, axes: List[Index]
) -> BlockManager:
assert isinstance(names, Index)
assert isinstance(axes, list)
assert all(isinstance(x, Index) for x in axes)
arrays = [x if not isinstance(x, ABCPandasArray) else x.to_numpy() for x in arrays]
try:
blocks = _form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
passed = tuple(map(int, [tot_items] + list(block_shape)))
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
if len(implied) <= 2:
implied = implied[::-1]
if passed == implied and e is not None:
return e
if block_shape[0] == 0:
return ValueError("Empty data passed with indices specified.")
return ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def _form_blocks(arrays, names: Index, axes: List[Index]) -> List[Block]:
items_dict: DefaultDict[str, List] = defaultdict(list)
extra_locs = []
names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, v))
blocks: List[Block] = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["NumericBlock"]):
complex_blocks = _multi_blockify(items_dict["NumericBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=i, ndim=2)
for i, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=i, ndim=2)
for i, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=i, ndim=2)
for i, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=i, ndim=2)
for i, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs, ndim=2)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype) -> List[Block]:
values, placement = _stack_arrays(tuples, dtype)
if dtype is not None and values.dtype != dtype:
values = values.astype(dtype)
block = make_block(values, placement=placement, ndim=2)
return [block]
def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement, ndim=2)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype: np.dtype):
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
placement, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + first.shape
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks: Sequence[Block]) -> Optional[DtypeObj]:
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks: List[Block] = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks = extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(
blocks: List[Block], dtype: DtypeObj, can_consolidate: bool
) -> List[Block]:
if len(blocks) == 1:
return blocks
if can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return [make_block(new_values, placement=new_mgr_locs, ndim=2)]
return blocks
def _fast_count_smallints(arr: np.ndarray) -> np.ndarray:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length: int, allow_fill: bool):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
| true
| true
|
1c45b3d5de333d6534be0122ea89da552988ca0c
| 601
|
py
|
Python
|
tests/changes/api/test_build_mark_seen.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | 1
|
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
tests/changes/api/test_build_mark_seen.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
tests/changes/api/test_build_mark_seen.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from changes.models import BuildSeen
from changes.testutils import APITestCase
class BuildMarkSeenTest(APITestCase):
def test_simple(self):
project = self.create_project()
build = self.create_build(project=project)
self.login_default()
path = '/api/0/builds/{0}/mark_seen/'.format(build.id.hex)
resp = self.client.post(path)
assert resp.status_code == 200
buildseen = BuildSeen.query.filter(
BuildSeen.user_id == self.default_user.id,
BuildSeen.build_id == build.id,
).first()
assert buildseen
| 26.130435
| 66
| 0.648918
|
from changes.models import BuildSeen
from changes.testutils import APITestCase
class BuildMarkSeenTest(APITestCase):
def test_simple(self):
project = self.create_project()
build = self.create_build(project=project)
self.login_default()
path = '/api/0/builds/{0}/mark_seen/'.format(build.id.hex)
resp = self.client.post(path)
assert resp.status_code == 200
buildseen = BuildSeen.query.filter(
BuildSeen.user_id == self.default_user.id,
BuildSeen.build_id == build.id,
).first()
assert buildseen
| true
| true
|
1c45b4011172fbf7f667e12379db8e0b37a73ae8
| 644
|
py
|
Python
|
WebFilm/urls.py
|
marekbaranowski98/WebFilm
|
5d78bb9518070c195feffc2181735b93be019ca0
|
[
"MIT"
] | null | null | null |
WebFilm/urls.py
|
marekbaranowski98/WebFilm
|
5d78bb9518070c195feffc2181735b93be019ca0
|
[
"MIT"
] | null | null | null |
WebFilm/urls.py
|
marekbaranowski98/WebFilm
|
5d78bb9518070c195feffc2181735b93be019ca0
|
[
"MIT"
] | null | null | null |
"""WebFilm URL Configuration
path docs/ loads url from apps docs
path / loads url from apps frontend
path api/users/ loads url from apps users
path api/photos loads url from app photos
path api/movies loads url from app movies
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('docs/', include('docs.urls')),
path('api/users/', include('users.urls')),
path('api/photos/', include('photos.urls')),
path('api/movies/', include('movies.urls')),
path('api/evaluations/', include('evaluations.urls')),
path('', include('frontend.urls')),
]
| 30.666667
| 58
| 0.692547
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('docs/', include('docs.urls')),
path('api/users/', include('users.urls')),
path('api/photos/', include('photos.urls')),
path('api/movies/', include('movies.urls')),
path('api/evaluations/', include('evaluations.urls')),
path('', include('frontend.urls')),
]
| true
| true
|
1c45b4b0875ea7d446dba15109b8e98b5d4bdaab
| 3,439
|
py
|
Python
|
libqtile/widget/windowname.py
|
Bauthe/qtile
|
569c4d9aaad1dbd912435648f5f814e084de8365
|
[
"MIT"
] | null | null | null |
libqtile/widget/windowname.py
|
Bauthe/qtile
|
569c4d9aaad1dbd912435648f5f814e084de8365
|
[
"MIT"
] | null | null | null |
libqtile/widget/windowname.py
|
Bauthe/qtile
|
569c4d9aaad1dbd912435648f5f814e084de8365
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2010 matt
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 Tim Neumann
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import bar, hook, pangocffi
from libqtile.widget import base
class WindowName(base._TextBox):
"""Displays the name of the window that currently has focus"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('for_current_screen', False, 'instead of this bars screen use currently active screen'),
('empty_group_string', ' ', 'string to display when no windows are focused on current group'),
('max_chars', 0, 'max chars before truncating with ellipsis'),
('format', '{state}{name}', 'format of the text'),
]
def __init__(self, width=bar.STRETCH, **config):
base._TextBox.__init__(self, width=width, **config)
self.add_defaults(WindowName.defaults)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
hook.subscribe.client_name_updated(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
@hook.subscribe.current_screen_change
def on_screen_changed():
if self.for_current_screen:
self.update()
def truncate(self, text):
if self.max_chars == 0:
return text
return (text[:self.max_chars - 3].rstrip() + "...") if len(text) > self.max_chars else text
def update(self, *args):
if self.for_current_screen:
w = self.qtile.current_screen.group.current_window
else:
w = self.bar.screen.group.current_window
state = ''
if w:
if w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
var = {}
var["state"] = state
var["name"] = w.name
var["class"] = w.window.get_wm_class()[0] if len(w.window.get_wm_class()) > 0 else ""
text = self.format.format(**var)
unescaped = self.truncate(text)
else:
unescaped = self.empty_group_string
self.text = pangocffi.markup_escape_text(unescaped)
self.bar.draw()
| 40.458824
| 102
| 0.662402
|
from libqtile import bar, hook, pangocffi
from libqtile.widget import base
class WindowName(base._TextBox):
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('for_current_screen', False, 'instead of this bars screen use currently active screen'),
('empty_group_string', ' ', 'string to display when no windows are focused on current group'),
('max_chars', 0, 'max chars before truncating with ellipsis'),
('format', '{state}{name}', 'format of the text'),
]
def __init__(self, width=bar.STRETCH, **config):
base._TextBox.__init__(self, width=width, **config)
self.add_defaults(WindowName.defaults)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
hook.subscribe.client_name_updated(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
@hook.subscribe.current_screen_change
def on_screen_changed():
if self.for_current_screen:
self.update()
def truncate(self, text):
if self.max_chars == 0:
return text
return (text[:self.max_chars - 3].rstrip() + "...") if len(text) > self.max_chars else text
def update(self, *args):
if self.for_current_screen:
w = self.qtile.current_screen.group.current_window
else:
w = self.bar.screen.group.current_window
state = ''
if w:
if w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
var = {}
var["state"] = state
var["name"] = w.name
var["class"] = w.window.get_wm_class()[0] if len(w.window.get_wm_class()) > 0 else ""
text = self.format.format(**var)
unescaped = self.truncate(text)
else:
unescaped = self.empty_group_string
self.text = pangocffi.markup_escape_text(unescaped)
self.bar.draw()
| true
| true
|
1c45b8317ee2fbfb8197eed5bc2187f391f7f3ad
| 3,634
|
py
|
Python
|
root/settings.py
|
henrid3v/pocket-man
|
d0e7f44674db877b3e658ee7fc8b0fddf79bfcc8
|
[
"MIT"
] | null | null | null |
root/settings.py
|
henrid3v/pocket-man
|
d0e7f44674db877b3e658ee7fc8b0fddf79bfcc8
|
[
"MIT"
] | 1
|
2020-11-28T21:27:01.000Z
|
2020-11-28T21:29:32.000Z
|
root/settings.py
|
shadowcompiler/pocket-man
|
d0e7f44674db877b3e658ee7fc8b0fddf79bfcc8
|
[
"MIT"
] | null | null | null |
"""
Django settings for root project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import environ
env = environ.Env()
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pocket',
'pocket.users',
'pocket.manager',
'crispy_forms',
'widget_tweaks',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'root.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'root.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static/')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_URL = 'accounts/login'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.720588
| 91
| 0.705559
|
import os
import environ
env = environ.Env()
environ.Env.read_env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pocket',
'pocket.users',
'pocket.manager',
'crispy_forms',
'widget_tweaks',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'root.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'root.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static/')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_URL = 'accounts/login'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true
| true
|
1c45b92429dcb84d7d15f647c4e3472f81ee716b
| 4,819
|
py
|
Python
|
pychron/lasers/tasks/panes/uv.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/lasers/tasks/panes/uv.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/lasers/tasks/panes/uv.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.traits_dock_pane import TraitsDockPane
from traitsui.api import (
View,
Item,
VGroup,
HGroup,
spring,
UItem,
ButtonEditor,
Group,
EnumEditor,
)
from pychron.core.ui.led_editor import LEDEditor
from pychron.core.ui.qt.reference_mark_editor import ReferenceMarkEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.lasers.tasks.laser_panes import (
BaseLaserPane,
ClientPane,
StageControlPane,
AxesPane,
SupplementalPane,
)
# ============= standard library imports ========================
# ============= local library imports ==========================
class FusionsUVClientPane(ClientPane):
pass
class FusionsUVPane(BaseLaserPane):
pass
class FusionsUVStagePane(StageControlPane):
id = "pychron.fusions.uv.stage"
def _get_tabs(self):
tabs = super(FusionsUVStagePane, self)._get_tabs()
refmark_grp = VGroup(
HGroup(
UItem(
"object.reference_marks.mark",
editor=EnumEditor(name="object.reference_marks.mark_ids"),
),
icon_button_editor("add_reference_mark_button", "add"),
),
Item("object.reference_marks.mark_display", editor=ReferenceMarkEditor()),
UItem("reset_reference_marks_button"),
Item("object.reference_marks.spacing"),
Item("save_reference_marks_canvas_button"),
label="Ref. Marks",
)
tabs.content.append(refmark_grp)
return tabs
class FusionsUVAxesPane(AxesPane):
id = "pychron.fusions.uv.axes"
class FusionsUVSupplementalPane(SupplementalPane):
id = "pychron.fusions.uv.supplemental"
name = "UV"
def traits_view(self):
v = View(
Group(
VGroup(
Item("fiber_light", style="custom", show_label=False),
label="FiberLight",
),
layout="tabbed",
)
)
return v
def button_editor(name, label, **kw):
return UItem(name, editor=ButtonEditor(label_value=label))
class FusionsUVControlPane(TraitsDockPane):
id = "pychron.fusions.uv.control"
def traits_view(self):
grp = VGroup(
HGroup(
Item(
"enabled",
show_label=False,
style="custom",
editor=LEDEditor(colors=["red", "green"]),
),
button_editor("enable", "enable_label"),
spring,
),
HGroup(
Item("action_readback", width=100, style="readonly", label="Action"),
Item("status_readback", style="readonly", label="Status"),
),
HGroup(
button_editor("fire_button", "fire_label"),
Item("fire_mode", show_label=False),
enabled_when='object.enabled and object.status_readback=="Laser On"',
),
HGroup(
Item("burst_shot", label="N Burst", enabled_when='fire_mode=="Burst"'),
Item("reprate", label="Rep. Rate"),
),
HGroup(
Item("burst_readback", label="Burst Rem.", width=50, style="readonly"),
Item(
"energy_readback",
label="Energy (mJ)",
style="readonly",
format_str="%0.2f",
),
Item(
"pressure_readback",
label="Pressure (mbar)",
style="readonly",
width=100,
format_str="%0.1f",
),
spring,
enabled_when="object.enabled",
),
)
v = View(grp)
return v
# ============= EOF =============================================
| 31.292208
| 87
| 0.529155
|
from __future__ import absolute_import
from pyface.tasks.traits_dock_pane import TraitsDockPane
from traitsui.api import (
View,
Item,
VGroup,
HGroup,
spring,
UItem,
ButtonEditor,
Group,
EnumEditor,
)
from pychron.core.ui.led_editor import LEDEditor
from pychron.core.ui.qt.reference_mark_editor import ReferenceMarkEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.lasers.tasks.laser_panes import (
BaseLaserPane,
ClientPane,
StageControlPane,
AxesPane,
SupplementalPane,
)
class FusionsUVClientPane(ClientPane):
pass
class FusionsUVPane(BaseLaserPane):
pass
class FusionsUVStagePane(StageControlPane):
id = "pychron.fusions.uv.stage"
def _get_tabs(self):
tabs = super(FusionsUVStagePane, self)._get_tabs()
refmark_grp = VGroup(
HGroup(
UItem(
"object.reference_marks.mark",
editor=EnumEditor(name="object.reference_marks.mark_ids"),
),
icon_button_editor("add_reference_mark_button", "add"),
),
Item("object.reference_marks.mark_display", editor=ReferenceMarkEditor()),
UItem("reset_reference_marks_button"),
Item("object.reference_marks.spacing"),
Item("save_reference_marks_canvas_button"),
label="Ref. Marks",
)
tabs.content.append(refmark_grp)
return tabs
class FusionsUVAxesPane(AxesPane):
id = "pychron.fusions.uv.axes"
class FusionsUVSupplementalPane(SupplementalPane):
id = "pychron.fusions.uv.supplemental"
name = "UV"
def traits_view(self):
v = View(
Group(
VGroup(
Item("fiber_light", style="custom", show_label=False),
label="FiberLight",
),
layout="tabbed",
)
)
return v
def button_editor(name, label, **kw):
return UItem(name, editor=ButtonEditor(label_value=label))
class FusionsUVControlPane(TraitsDockPane):
id = "pychron.fusions.uv.control"
def traits_view(self):
grp = VGroup(
HGroup(
Item(
"enabled",
show_label=False,
style="custom",
editor=LEDEditor(colors=["red", "green"]),
),
button_editor("enable", "enable_label"),
spring,
),
HGroup(
Item("action_readback", width=100, style="readonly", label="Action"),
Item("status_readback", style="readonly", label="Status"),
),
HGroup(
button_editor("fire_button", "fire_label"),
Item("fire_mode", show_label=False),
enabled_when='object.enabled and object.status_readback=="Laser On"',
),
HGroup(
Item("burst_shot", label="N Burst", enabled_when='fire_mode=="Burst"'),
Item("reprate", label="Rep. Rate"),
),
HGroup(
Item("burst_readback", label="Burst Rem.", width=50, style="readonly"),
Item(
"energy_readback",
label="Energy (mJ)",
style="readonly",
format_str="%0.2f",
),
Item(
"pressure_readback",
label="Pressure (mbar)",
style="readonly",
width=100,
format_str="%0.1f",
),
spring,
enabled_when="object.enabled",
),
)
v = View(grp)
return v
| true
| true
|
1c45b960408ef5e1ab38b4817737225fd34b5a9f
| 575
|
py
|
Python
|
test/test_ncbi.py
|
Daniel-Davies/pytaxize
|
446990c0f64c8360f1ee65fa7beaeb2410f6213d
|
[
"MIT"
] | 21
|
2015-02-23T19:41:09.000Z
|
2020-11-04T15:11:20.000Z
|
test/test_ncbi.py
|
Daniel-Davies/pytaxize
|
446990c0f64c8360f1ee65fa7beaeb2410f6213d
|
[
"MIT"
] | 56
|
2015-01-12T09:05:10.000Z
|
2020-09-24T01:48:10.000Z
|
test/test_ncbi.py
|
Daniel-Davies/pytaxize
|
446990c0f64c8360f1ee65fa7beaeb2410f6213d
|
[
"MIT"
] | 21
|
2015-01-12T08:45:02.000Z
|
2020-09-10T01:01:43.000Z
|
import os
from nose.tools import *
import unittest
import vcr
from pytaxize import ncbi
class NcbiTest(unittest.TestCase):
@vcr.use_cassette("test/vcr_cassettes/ncbi_search.yml", filter_query_parameters=['api_key'])
def test_ncbi_search(self):
"ncbi.search"
x = ncbi.search(sci_com = "Apis")
assert type(x) == dict
assert list(x.keys()) == ["Apis"]
assert type(x['Apis']) == list
assert type(x['Apis'][0]) == dict
assert x['Apis'][0]['ScientificName'] == "Apis"
assert x['Apis'][0]['TaxId'] == "7459"
| 30.263158
| 96
| 0.61913
|
import os
from nose.tools import *
import unittest
import vcr
from pytaxize import ncbi
class NcbiTest(unittest.TestCase):
@vcr.use_cassette("test/vcr_cassettes/ncbi_search.yml", filter_query_parameters=['api_key'])
def test_ncbi_search(self):
x = ncbi.search(sci_com = "Apis")
assert type(x) == dict
assert list(x.keys()) == ["Apis"]
assert type(x['Apis']) == list
assert type(x['Apis'][0]) == dict
assert x['Apis'][0]['ScientificName'] == "Apis"
assert x['Apis'][0]['TaxId'] == "7459"
| true
| true
|
1c45ba8f50be8960f823fac0995df7dfaa1215e0
| 218
|
py
|
Python
|
models/__init__.py
|
netotz/p-dispersion-problem
|
123a6110dbf64d19a221da545c0590f7efc500dc
|
[
"MIT"
] | 1
|
2021-09-23T06:31:47.000Z
|
2021-09-23T06:31:47.000Z
|
models/__init__.py
|
binary-hideout/p-dispersion-problem
|
123a6110dbf64d19a221da545c0590f7efc500dc
|
[
"MIT"
] | 1
|
2021-08-31T15:15:08.000Z
|
2021-08-31T15:15:08.000Z
|
models/__init__.py
|
netotz/p-dispersion-problem
|
123a6110dbf64d19a221da545c0590f7efc500dc
|
[
"MIT"
] | 1
|
2020-05-19T04:46:47.000Z
|
2020-05-19T04:46:47.000Z
|
'''
Package that contains the models of the project.
These models are the classes of Point and PDPInstance.
'''
# package level imports
from .point import Point
from .pdp_instance import PDPInstance, Matrix, Solution
| 24.222222
| 55
| 0.784404
|
from .point import Point
from .pdp_instance import PDPInstance, Matrix, Solution
| true
| true
|
1c45bafe765f80375e19d84146bad5379603a450
| 356
|
py
|
Python
|
Interviews/HUAWEI/19/1.py
|
cnsteven/online-judge
|
60ee841a97e2bc0dc9c7b23fe5daa186898ab8b7
|
[
"MIT"
] | 1
|
2019-05-04T10:28:32.000Z
|
2019-05-04T10:28:32.000Z
|
Interviews/HUAWEI/19/1.py
|
cnsteven/online-judge
|
60ee841a97e2bc0dc9c7b23fe5daa186898ab8b7
|
[
"MIT"
] | null | null | null |
Interviews/HUAWEI/19/1.py
|
cnsteven/online-judge
|
60ee841a97e2bc0dc9c7b23fe5daa186898ab8b7
|
[
"MIT"
] | 3
|
2020-12-31T04:36:38.000Z
|
2021-07-25T07:39:31.000Z
|
import math
n = list(map(int, input().split()))
length = len(n)
dp = [math.inf] * length
for i in range(1, int(length / 2)):
step = 1
idx = i
while idx < length:
dp[idx] = min(dp[idx], step)
idx = idx + n[idx]
step += 1
if dp[length - 1] == math.inf:
print(-1)
else:
print(dp[length - 1])
| 17.8
| 37
| 0.491573
|
import math
n = list(map(int, input().split()))
length = len(n)
dp = [math.inf] * length
for i in range(1, int(length / 2)):
step = 1
idx = i
while idx < length:
dp[idx] = min(dp[idx], step)
idx = idx + n[idx]
step += 1
if dp[length - 1] == math.inf:
print(-1)
else:
print(dp[length - 1])
| true
| true
|
1c45bb098fd540b0ca4ce20913c1c1b808e0ae7b
| 1,204
|
py
|
Python
|
tutorial/proxy.py
|
maksimKorzh/fresh-proxy-list
|
e9ed2821a8445430aa30252c01b618892093f5ed
|
[
"MIT"
] | 7
|
2019-05-24T15:08:25.000Z
|
2020-06-08T07:51:33.000Z
|
tutorial/proxy.py
|
maksimKorzh/fresh-proxy-list
|
e9ed2821a8445430aa30252c01b618892093f5ed
|
[
"MIT"
] | null | null | null |
tutorial/proxy.py
|
maksimKorzh/fresh-proxy-list
|
e9ed2821a8445430aa30252c01b618892093f5ed
|
[
"MIT"
] | 5
|
2019-11-19T23:00:57.000Z
|
2021-12-22T04:01:31.000Z
|
import requests
from bs4 import BeautifulSoup
proxyList = []
response = requests.get('https://free-proxy-list.net/')
bs = BeautifulSoup(response.text, 'lxml')
table = bs.find('table')
rows = table.find_all('tr')
count = 0
for row in rows:
ip = row.contents[0].text
port = row.contents[1].text
anonym = row.contents[4].text
secconn = row.contents[6].text
if(secconn == 'yes' and (anonym == 'anonymous' or anonym == 'elite proxy')):
line = 'http://' + ip + ':' + port
proxies = { 'http': line, 'https': line }
try:
testIP = requests.get('https://httpbin.org/ip', proxies = proxies, timeout = 3)
print(testIP.text)
resIP = testIP.json()['origin']
origin = resIP.split(',')
if origin[0] == ip:
print(' Proxy ok! Appending proxy to proxyList...')
proxyList.append(line)
count += 1
if count == 5:
break
except:
print('Bad proxy')
with open('proxies.txt', 'w') as f:
for proxy in proxyList:
f.write("%s\n" % proxy)
| 27.363636
| 91
| 0.508306
|
import requests
from bs4 import BeautifulSoup
proxyList = []
response = requests.get('https://free-proxy-list.net/')
bs = BeautifulSoup(response.text, 'lxml')
table = bs.find('table')
rows = table.find_all('tr')
count = 0
for row in rows:
ip = row.contents[0].text
port = row.contents[1].text
anonym = row.contents[4].text
secconn = row.contents[6].text
if(secconn == 'yes' and (anonym == 'anonymous' or anonym == 'elite proxy')):
line = 'http://' + ip + ':' + port
proxies = { 'http': line, 'https': line }
try:
testIP = requests.get('https://httpbin.org/ip', proxies = proxies, timeout = 3)
print(testIP.text)
resIP = testIP.json()['origin']
origin = resIP.split(',')
if origin[0] == ip:
print(' Proxy ok! Appending proxy to proxyList...')
proxyList.append(line)
count += 1
if count == 5:
break
except:
print('Bad proxy')
with open('proxies.txt', 'w') as f:
for proxy in proxyList:
f.write("%s\n" % proxy)
| true
| true
|
1c45bb97d6036108335eeb9c5089a59bb600968e
| 8,237
|
py
|
Python
|
bluzelle/codec/crud/KeyValue_pb2.py
|
hhio618/bluezelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | 3
|
2021-08-19T10:09:29.000Z
|
2022-01-05T14:19:59.000Z
|
bluzelle/codec/crud/KeyValue_pb2.py
|
hhio618/bluzelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | null | null | null |
bluzelle/codec/crud/KeyValue_pb2.py
|
hhio618/bluzelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: crud/KeyValue.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from bluzelle.codec.crud import lease_pb2 as crud_dot_lease__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="crud/KeyValue.proto",
package="bluzelle.curium.crud",
syntax="proto3",
serialized_options=b"Z'github.com/bluzelle/curium/x/crud/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x63rud/KeyValue.proto\x12\x14\x62luzelle.curium.crud\x1a\x10\x63rud/lease.proto"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c"W\n\rKeyValueLease\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x12*\n\x05lease\x18\x03 \x01(\x0b\x32\x1b.bluzelle.curium.crud.Lease"(\n\x08KeyLease\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0f\n\x07seconds\x18\x02 \x01(\rB)Z\'github.com/bluzelle/curium/x/crud/typesb\x06proto3',
dependencies=[
crud_dot_lease__pb2.DESCRIPTOR,
],
)
_KEYVALUE = _descriptor.Descriptor(
name="KeyValue",
full_name="bluzelle.curium.crud.KeyValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyValue.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="bluzelle.curium.crud.KeyValue.value",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=63,
serialized_end=101,
)
_KEYVALUELEASE = _descriptor.Descriptor(
name="KeyValueLease",
full_name="bluzelle.curium.crud.KeyValueLease",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyValueLease.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="bluzelle.curium.crud.KeyValueLease.value",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="lease",
full_name="bluzelle.curium.crud.KeyValueLease.lease",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=103,
serialized_end=190,
)
_KEYLEASE = _descriptor.Descriptor(
name="KeyLease",
full_name="bluzelle.curium.crud.KeyLease",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyLease.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seconds",
full_name="bluzelle.curium.crud.KeyLease.seconds",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=192,
serialized_end=232,
)
_KEYVALUELEASE.fields_by_name["lease"].message_type = crud_dot_lease__pb2._LEASE
DESCRIPTOR.message_types_by_name["KeyValue"] = _KEYVALUE
DESCRIPTOR.message_types_by_name["KeyValueLease"] = _KEYVALUELEASE
DESCRIPTOR.message_types_by_name["KeyLease"] = _KEYLEASE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeyValue = _reflection.GeneratedProtocolMessageType(
"KeyValue",
(_message.Message,),
{
"DESCRIPTOR": _KEYVALUE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyValue)
},
)
_sym_db.RegisterMessage(KeyValue)
KeyValueLease = _reflection.GeneratedProtocolMessageType(
"KeyValueLease",
(_message.Message,),
{
"DESCRIPTOR": _KEYVALUELEASE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyValueLease)
},
)
_sym_db.RegisterMessage(KeyValueLease)
KeyLease = _reflection.GeneratedProtocolMessageType(
"KeyLease",
(_message.Message,),
{
"DESCRIPTOR": _KEYLEASE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyLease)
},
)
_sym_db.RegisterMessage(KeyLease)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 30.507407
| 489
| 0.618308
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from bluzelle.codec.crud import lease_pb2 as crud_dot_lease__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="crud/KeyValue.proto",
package="bluzelle.curium.crud",
syntax="proto3",
serialized_options=b"Z'github.com/bluzelle/curium/x/crud/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x63rud/KeyValue.proto\x12\x14\x62luzelle.curium.crud\x1a\x10\x63rud/lease.proto"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c"W\n\rKeyValueLease\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x12*\n\x05lease\x18\x03 \x01(\x0b\x32\x1b.bluzelle.curium.crud.Lease"(\n\x08KeyLease\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0f\n\x07seconds\x18\x02 \x01(\rB)Z\'github.com/bluzelle/curium/x/crud/typesb\x06proto3',
dependencies=[
crud_dot_lease__pb2.DESCRIPTOR,
],
)
_KEYVALUE = _descriptor.Descriptor(
name="KeyValue",
full_name="bluzelle.curium.crud.KeyValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyValue.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="bluzelle.curium.crud.KeyValue.value",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=63,
serialized_end=101,
)
_KEYVALUELEASE = _descriptor.Descriptor(
name="KeyValueLease",
full_name="bluzelle.curium.crud.KeyValueLease",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyValueLease.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="bluzelle.curium.crud.KeyValueLease.value",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="lease",
full_name="bluzelle.curium.crud.KeyValueLease.lease",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=103,
serialized_end=190,
)
_KEYLEASE = _descriptor.Descriptor(
name="KeyLease",
full_name="bluzelle.curium.crud.KeyLease",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="bluzelle.curium.crud.KeyLease.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seconds",
full_name="bluzelle.curium.crud.KeyLease.seconds",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=192,
serialized_end=232,
)
_KEYVALUELEASE.fields_by_name["lease"].message_type = crud_dot_lease__pb2._LEASE
DESCRIPTOR.message_types_by_name["KeyValue"] = _KEYVALUE
DESCRIPTOR.message_types_by_name["KeyValueLease"] = _KEYVALUELEASE
DESCRIPTOR.message_types_by_name["KeyLease"] = _KEYLEASE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeyValue = _reflection.GeneratedProtocolMessageType(
"KeyValue",
(_message.Message,),
{
"DESCRIPTOR": _KEYVALUE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyValue)
},
)
_sym_db.RegisterMessage(KeyValue)
KeyValueLease = _reflection.GeneratedProtocolMessageType(
"KeyValueLease",
(_message.Message,),
{
"DESCRIPTOR": _KEYVALUELEASE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyValueLease)
},
)
_sym_db.RegisterMessage(KeyValueLease)
KeyLease = _reflection.GeneratedProtocolMessageType(
"KeyLease",
(_message.Message,),
{
"DESCRIPTOR": _KEYLEASE,
"__module__": "crud.KeyValue_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.KeyLease)
},
)
_sym_db.RegisterMessage(KeyLease)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
1c45bbaba79b9d8c2de84555e39251039007bf90
| 2,727
|
py
|
Python
|
crypto/hard1/service/server.py
|
AnyKeyShik/CTF_Code
|
32ff5dce6452dbea09eff0a4db7ad603efe4027d
|
[
"Apache-2.0"
] | null | null | null |
crypto/hard1/service/server.py
|
AnyKeyShik/CTF_Code
|
32ff5dce6452dbea09eff0a4db7ad603efe4027d
|
[
"Apache-2.0"
] | null | null | null |
crypto/hard1/service/server.py
|
AnyKeyShik/CTF_Code
|
32ff5dce6452dbea09eff0a4db7ad603efe4027d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
from math import pow
def gcd(a, b):
if a < b:
return gcd(b, a)
elif a % b == 0:
return b;
else:
return gcd(b, a % b)
def gen_key(modulo):
key = randint(pow(10, 20), modulo)
while gcd(modulo, key) != 1:
key = randint(pow(10, 20), modulo)
return key
def power(num, exp, mod):
x = 1
y = num
while exp > 0:
if exp % 2 != 0:
x = (x * y) % mod
y = (y * y) % mod
exp = exp // 2
return x % mod
def encrypt(flag, modulo, generator, pub):
sender_key = gen_key(modulo)
secret = power(pub, sender_key, modulo)
c1 = power(generator, sender_key, modulo)
c2 = secret * flag
return c1, c2
def decrypt(c1, c2, priv, modulo):
c1_x = power(c1, priv, modulo)
msg = (c2 // c1_x) % modulo
msg = hex(msg)[2:]
msg = ''.join([chr(int(''.join(ch), 16)) for ch in zip(msg[0::2], msg[1::2])])
return msg
def read_flag():
try:
with open("flag", 'r') as flagfile:
flag = flagfile.read()
except IOError:
print('Some files is missing, tell admin')
exit(-1)
hexflag = "".join("{:02x}".format(ord(ch)) for ch in flag)
numflag = int(hexflag, 16)
return numflag
def prepare_elgamal():
modulo = randint(pow(10, 20), pow(10, 50))
generator = randint(2, modulo)
private = gen_key(modulo)
public = power(generator, private, modulo)
return (modulo, generator, public), private
def main():
# Challenge text
CHALL_TEXT = "Hi. This is your friendly 'Decryption Oracle'\nWe have implemented a well-known public-key cryptosystem. Guess which ;)\n\nModulo: {modulo}\nGenerator: {generator}\nPublic key: {public}\nCiphertext: {cipher}\n\nInsert your Ciphertext-Tuple for me to decrypt - comma seperated (e.g. 5,6)"
SAME_MSG = "Duh! This would be too easy, right?"
INVITE = ">>> "
INCORRECT_INPUT = "Incorrect input!"
flag = read_flag()
public, private = prepare_elgamal()
cipher = encrypt(flag, *public)
print(CHALL_TEXT.format(modulo=public[0], generator=public[1], public=public[2], cipher=cipher))
while True:
print(INVITE, end='')
user_input = input()
try:
enc_msg = tuple(map(int, user_input.replace(' ', '').split(',')))
if len(enc_msg) != 2:
raise ValueException
except Exception:
print(INCORRECT_INPUT)
continue
if enc_msg == cipher:
msg = SAME_MSG
else:
msg = decrypt(*enc_msg, private, public[0])
print(msg)
if __name__ == '__main__':
main()
| 23.110169
| 305
| 0.574624
|
from random import randint
from math import pow
def gcd(a, b):
if a < b:
return gcd(b, a)
elif a % b == 0:
return b;
else:
return gcd(b, a % b)
def gen_key(modulo):
key = randint(pow(10, 20), modulo)
while gcd(modulo, key) != 1:
key = randint(pow(10, 20), modulo)
return key
def power(num, exp, mod):
x = 1
y = num
while exp > 0:
if exp % 2 != 0:
x = (x * y) % mod
y = (y * y) % mod
exp = exp // 2
return x % mod
def encrypt(flag, modulo, generator, pub):
sender_key = gen_key(modulo)
secret = power(pub, sender_key, modulo)
c1 = power(generator, sender_key, modulo)
c2 = secret * flag
return c1, c2
def decrypt(c1, c2, priv, modulo):
c1_x = power(c1, priv, modulo)
msg = (c2 // c1_x) % modulo
msg = hex(msg)[2:]
msg = ''.join([chr(int(''.join(ch), 16)) for ch in zip(msg[0::2], msg[1::2])])
return msg
def read_flag():
try:
with open("flag", 'r') as flagfile:
flag = flagfile.read()
except IOError:
print('Some files is missing, tell admin')
exit(-1)
hexflag = "".join("{:02x}".format(ord(ch)) for ch in flag)
numflag = int(hexflag, 16)
return numflag
def prepare_elgamal():
modulo = randint(pow(10, 20), pow(10, 50))
generator = randint(2, modulo)
private = gen_key(modulo)
public = power(generator, private, modulo)
return (modulo, generator, public), private
def main():
CHALL_TEXT = "Hi. This is your friendly 'Decryption Oracle'\nWe have implemented a well-known public-key cryptosystem. Guess which ;)\n\nModulo: {modulo}\nGenerator: {generator}\nPublic key: {public}\nCiphertext: {cipher}\n\nInsert your Ciphertext-Tuple for me to decrypt - comma seperated (e.g. 5,6)"
SAME_MSG = "Duh! This would be too easy, right?"
INVITE = ">>> "
INCORRECT_INPUT = "Incorrect input!"
flag = read_flag()
public, private = prepare_elgamal()
cipher = encrypt(flag, *public)
print(CHALL_TEXT.format(modulo=public[0], generator=public[1], public=public[2], cipher=cipher))
while True:
print(INVITE, end='')
user_input = input()
try:
enc_msg = tuple(map(int, user_input.replace(' ', '').split(',')))
if len(enc_msg) != 2:
raise ValueException
except Exception:
print(INCORRECT_INPUT)
continue
if enc_msg == cipher:
msg = SAME_MSG
else:
msg = decrypt(*enc_msg, private, public[0])
print(msg)
if __name__ == '__main__':
main()
| true
| true
|
1c45bd5ae57fb300ba5e328a5611c8d8c5854181
| 1,330
|
py
|
Python
|
tests/test_data/test_sciense.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | null | null | null |
tests/test_data/test_sciense.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | null | null | null |
tests/test_data/test_sciense.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | 1
|
2019-12-27T19:34:17.000Z
|
2019-12-27T19:34:17.000Z
|
# -*- coding: utf-8 -*-
import re
from unittest import TestCase
from elizabeth import Science
import elizabeth.core.interdata as common
from tests.test_data import DummyCase
from ._patterns import STR_REGEX
class ScienceBaseTest(TestCase):
def setUp(self):
self.science = Science()
def tearDown(self):
del self.science
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.science.__str__()))
def test_math_formula(self):
result = self.science.math_formula()
self.assertIn(result, common.MATH_FORMULAS)
class ScienceTestCase(DummyCase):
def test_scientific_article(self):
result = self.generic.science.scientific_article()
self.assertIn(result, self.generic.science._data['article'])
def test_scientist(self):
result = self.generic.science.scientist()
self.assertIn(result, self.generic.science._data['scientist'])
def test_chemical_element(self):
# Because: https://travis-ci.org/lk-geimfari/elizabeth/jobs/196565835
if self.generic.locale != 'fa':
result = self.generic.science.chemical_element(name_only=True)
self.assertTrue(len(result) >= 1)
result = self.generic.science.chemical_element(name_only=False)
self.assertIsInstance(result, dict)
| 30.227273
| 77
| 0.695489
|
import re
from unittest import TestCase
from elizabeth import Science
import elizabeth.core.interdata as common
from tests.test_data import DummyCase
from ._patterns import STR_REGEX
class ScienceBaseTest(TestCase):
def setUp(self):
self.science = Science()
def tearDown(self):
del self.science
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.science.__str__()))
def test_math_formula(self):
result = self.science.math_formula()
self.assertIn(result, common.MATH_FORMULAS)
class ScienceTestCase(DummyCase):
def test_scientific_article(self):
result = self.generic.science.scientific_article()
self.assertIn(result, self.generic.science._data['article'])
def test_scientist(self):
result = self.generic.science.scientist()
self.assertIn(result, self.generic.science._data['scientist'])
def test_chemical_element(self):
if self.generic.locale != 'fa':
result = self.generic.science.chemical_element(name_only=True)
self.assertTrue(len(result) >= 1)
result = self.generic.science.chemical_element(name_only=False)
self.assertIsInstance(result, dict)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.