body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
7d902204e1fc491288da87c85dc6b203c2324b020ecf3be1c2fca36879f74bd3
def negL_rhoomega_viaHDPTopicUtil(rho=None, omega=None, nDoc=0, sumLogPiActiveVec=None, sumLogPiRemVec=None, alpha=0.5, gamma=1.0, **kwargs): ' Compute minimization objective another way, using utility funcs.\n\n This allows verifying that our negL_rhoomega function is correct.\n\n Returns\n -------\n negL : -1 * L(rho, omega, ...)\n Should be the same value as negL_rhoomega.\n ' K = rho.size from .HDPTopicUtil import L_alloc Ldict = L_alloc(todict=1, rho=rho, omega=omega, nDoc=nDoc, alpha=alpha, gamma=gamma) from .HDPTopicUtil import calcELBO_NonlinearTerms Ldict2 = calcELBO_NonlinearTerms(todict=1, rho=rho, alpha=alpha, gamma=gamma, nDoc=nDoc, sumLogPi=sumLogPiActiveVec, sumLogPiRemVec=sumLogPiRemVec, gammalnTheta=np.zeros(K), gammalnSumTheta=0, gammalnThetaRem=0, slackTheta=np.zeros(K), slackThetaRem=0, Hresp=np.zeros(K)) Lrhoomega = (Ldict['Lalloc_rhoomega'] + Ldict2['Lslack_alphaEbeta']) return ((- 1) * Lrhoomega)
Compute minimization objective another way, using utility funcs. This allows verifying that our negL_rhoomega function is correct. Returns ------- negL : -1 * L(rho, omega, ...) Should be the same value as negL_rhoomega.
bnpy/allocmodel/topics/OptimizerRhoOmegaBetter.py
negL_rhoomega_viaHDPTopicUtil
tinnguyen96/bnpy
184
python
def negL_rhoomega_viaHDPTopicUtil(rho=None, omega=None, nDoc=0, sumLogPiActiveVec=None, sumLogPiRemVec=None, alpha=0.5, gamma=1.0, **kwargs): ' Compute minimization objective another way, using utility funcs.\n\n This allows verifying that our negL_rhoomega function is correct.\n\n Returns\n -------\n negL : -1 * L(rho, omega, ...)\n Should be the same value as negL_rhoomega.\n ' K = rho.size from .HDPTopicUtil import L_alloc Ldict = L_alloc(todict=1, rho=rho, omega=omega, nDoc=nDoc, alpha=alpha, gamma=gamma) from .HDPTopicUtil import calcELBO_NonlinearTerms Ldict2 = calcELBO_NonlinearTerms(todict=1, rho=rho, alpha=alpha, gamma=gamma, nDoc=nDoc, sumLogPi=sumLogPiActiveVec, sumLogPiRemVec=sumLogPiRemVec, gammalnTheta=np.zeros(K), gammalnSumTheta=0, gammalnThetaRem=0, slackTheta=np.zeros(K), slackThetaRem=0, Hresp=np.zeros(K)) Lrhoomega = (Ldict['Lalloc_rhoomega'] + Ldict2['Lslack_alphaEbeta']) return ((- 1) * Lrhoomega)
def negL_rhoomega_viaHDPTopicUtil(rho=None, omega=None, nDoc=0, sumLogPiActiveVec=None, sumLogPiRemVec=None, alpha=0.5, gamma=1.0, **kwargs): ' Compute minimization objective another way, using utility funcs.\n\n This allows verifying that our negL_rhoomega function is correct.\n\n Returns\n -------\n negL : -1 * L(rho, omega, ...)\n Should be the same value as negL_rhoomega.\n ' K = rho.size from .HDPTopicUtil import L_alloc Ldict = L_alloc(todict=1, rho=rho, omega=omega, nDoc=nDoc, alpha=alpha, gamma=gamma) from .HDPTopicUtil import calcELBO_NonlinearTerms Ldict2 = calcELBO_NonlinearTerms(todict=1, rho=rho, alpha=alpha, gamma=gamma, nDoc=nDoc, sumLogPi=sumLogPiActiveVec, sumLogPiRemVec=sumLogPiRemVec, gammalnTheta=np.zeros(K), gammalnSumTheta=0, gammalnThetaRem=0, slackTheta=np.zeros(K), slackThetaRem=0, Hresp=np.zeros(K)) Lrhoomega = (Ldict['Lalloc_rhoomega'] + Ldict2['Lslack_alphaEbeta']) return ((- 1) * Lrhoomega)<|docstring|>Compute minimization objective another way, using utility funcs. This allows verifying that our negL_rhoomega function is correct. Returns ------- negL : -1 * L(rho, omega, ...) Should be the same value as negL_rhoomega.<|endoftext|>
58b24c87ccef453e25661567f2b0ca723fcd8a95d49b06444a4630f01ea4e721
def __init__(self, name, Q, x_initial, P_initial, dim_main, dim_main_err, N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]): '\n Generates process function and all\n observation functions for the kalman\n filter.\n ' if (N > 0): self.msckf = True else: self.msckf = False self.N = N self.dim_augment = dim_augment self.dim_augment_err = dim_augment_err self.dim_main = dim_main self.dim_main_err = dim_main_err x_initial = x_initial.reshape(((- 1), 1)) self.dim_x = x_initial.shape[0] self.dim_err = P_initial.shape[0] assert ((dim_main + (dim_augment * N)) == self.dim_x) assert ((dim_main_err + (dim_augment_err * N)) == self.dim_err) self.maha_test_kinds = maha_test_kinds self.Q = Q self.rewind_t = [] self.rewind_states = [] self.rewind_obscache = [] self.init_state(x_initial, P_initial, None) (ffi, lib) = wrap_compiled(name, EXTERNAL_PATH) (kinds, self.feature_track_kinds) = ([], []) for func in dir(lib): if (func[:2] == 'h_'): kinds.append(int(func[2:])) if (func[:3] == 'He_'): self.feature_track_kinds.append(int(func[3:])) def wrap_1lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_2lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, lst2, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', lst2.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_1list_1float(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, fl, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double', fl), ffi.cast('double *', out.ctypes.data)) return ret self.f = wrap_1list_1float('f_fun') self.F = wrap_1list_1float('F_fun') self.err_function = wrap_2lists('err_fun') self.inv_err_function = wrap_2lists('inv_err_fun') self.H_mod = wrap_1lists('H_mod_fun') (self.hs, self.Hs, self.Hes) = ({}, {}, {}) for kind in kinds: self.hs[kind] = wrap_2lists(('h_%d' % kind)) self.Hs[kind] = wrap_2lists(('H_%d' % kind)) if (self.msckf and (kind in self.feature_track_kinds)): self.Hes[kind] = wrap_2lists(('He_%d' % kind)) def _predict_blas(x, P, dt): lib.predict(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', self.Q.ctypes.data), ffi.cast('double', dt)) return (x, P) def fun_wrapper(f, kind): f = eval(('lib.%s' % f), {'lib': lib}) def _update_inner_blas(x, P, z, R, extra_args): f(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', z.ctypes.data), ffi.cast('double *', R.ctypes.data), ffi.cast('double *', extra_args.ctypes.data)) if (self.msckf and (kind in self.feature_track_kinds)): y = z[:(- len(extra_args))] else: y = z return (x, P, y) return _update_inner_blas self._updates = {} for kind in kinds: self._updates[kind] = fun_wrapper(('update_%d' % kind), kind) def _update_blas(x, P, kind, z, R, extra_args=[]): return self._updates[kind](x, P, z, R, extra_args) self._predict = _predict_blas self._update = _update_blas
Generates process function and all observation functions for the kalman filter.
examples/kalman/ekf_sym.py
__init__
spectatorearth/laika
116
python
def __init__(self, name, Q, x_initial, P_initial, dim_main, dim_main_err, N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]): '\n Generates process function and all\n observation functions for the kalman\n filter.\n ' if (N > 0): self.msckf = True else: self.msckf = False self.N = N self.dim_augment = dim_augment self.dim_augment_err = dim_augment_err self.dim_main = dim_main self.dim_main_err = dim_main_err x_initial = x_initial.reshape(((- 1), 1)) self.dim_x = x_initial.shape[0] self.dim_err = P_initial.shape[0] assert ((dim_main + (dim_augment * N)) == self.dim_x) assert ((dim_main_err + (dim_augment_err * N)) == self.dim_err) self.maha_test_kinds = maha_test_kinds self.Q = Q self.rewind_t = [] self.rewind_states = [] self.rewind_obscache = [] self.init_state(x_initial, P_initial, None) (ffi, lib) = wrap_compiled(name, EXTERNAL_PATH) (kinds, self.feature_track_kinds) = ([], []) for func in dir(lib): if (func[:2] == 'h_'): kinds.append(int(func[2:])) if (func[:3] == 'He_'): self.feature_track_kinds.append(int(func[3:])) def wrap_1lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_2lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, lst2, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', lst2.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_1list_1float(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, fl, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double', fl), ffi.cast('double *', out.ctypes.data)) return ret self.f = wrap_1list_1float('f_fun') self.F = wrap_1list_1float('F_fun') self.err_function = wrap_2lists('err_fun') self.inv_err_function = wrap_2lists('inv_err_fun') self.H_mod = wrap_1lists('H_mod_fun') (self.hs, self.Hs, self.Hes) = ({}, {}, {}) for kind in kinds: self.hs[kind] = wrap_2lists(('h_%d' % kind)) self.Hs[kind] = wrap_2lists(('H_%d' % kind)) if (self.msckf and (kind in self.feature_track_kinds)): self.Hes[kind] = wrap_2lists(('He_%d' % kind)) def _predict_blas(x, P, dt): lib.predict(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', self.Q.ctypes.data), ffi.cast('double', dt)) return (x, P) def fun_wrapper(f, kind): f = eval(('lib.%s' % f), {'lib': lib}) def _update_inner_blas(x, P, z, R, extra_args): f(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', z.ctypes.data), ffi.cast('double *', R.ctypes.data), ffi.cast('double *', extra_args.ctypes.data)) if (self.msckf and (kind in self.feature_track_kinds)): y = z[:(- len(extra_args))] else: y = z return (x, P, y) return _update_inner_blas self._updates = {} for kind in kinds: self._updates[kind] = fun_wrapper(('update_%d' % kind), kind) def _update_blas(x, P, kind, z, R, extra_args=[]): return self._updates[kind](x, P, z, R, extra_args) self._predict = _predict_blas self._update = _update_blas
def __init__(self, name, Q, x_initial, P_initial, dim_main, dim_main_err, N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]): '\n Generates process function and all\n observation functions for the kalman\n filter.\n ' if (N > 0): self.msckf = True else: self.msckf = False self.N = N self.dim_augment = dim_augment self.dim_augment_err = dim_augment_err self.dim_main = dim_main self.dim_main_err = dim_main_err x_initial = x_initial.reshape(((- 1), 1)) self.dim_x = x_initial.shape[0] self.dim_err = P_initial.shape[0] assert ((dim_main + (dim_augment * N)) == self.dim_x) assert ((dim_main_err + (dim_augment_err * N)) == self.dim_err) self.maha_test_kinds = maha_test_kinds self.Q = Q self.rewind_t = [] self.rewind_states = [] self.rewind_obscache = [] self.init_state(x_initial, P_initial, None) (ffi, lib) = wrap_compiled(name, EXTERNAL_PATH) (kinds, self.feature_track_kinds) = ([], []) for func in dir(lib): if (func[:2] == 'h_'): kinds.append(int(func[2:])) if (func[:3] == 'He_'): self.feature_track_kinds.append(int(func[3:])) def wrap_1lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_2lists(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, lst2, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double *', lst2.ctypes.data), ffi.cast('double *', out.ctypes.data)) return ret def wrap_1list_1float(name): func = eval(('lib.%s' % name), {'lib': lib}) def ret(lst1, fl, out): func(ffi.cast('double *', lst1.ctypes.data), ffi.cast('double', fl), ffi.cast('double *', out.ctypes.data)) return ret self.f = wrap_1list_1float('f_fun') self.F = wrap_1list_1float('F_fun') self.err_function = wrap_2lists('err_fun') self.inv_err_function = wrap_2lists('inv_err_fun') self.H_mod = wrap_1lists('H_mod_fun') (self.hs, self.Hs, self.Hes) = ({}, {}, {}) for kind in kinds: self.hs[kind] = wrap_2lists(('h_%d' % kind)) self.Hs[kind] = wrap_2lists(('H_%d' % kind)) if (self.msckf and (kind in self.feature_track_kinds)): self.Hes[kind] = wrap_2lists(('He_%d' % kind)) def _predict_blas(x, P, dt): lib.predict(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', self.Q.ctypes.data), ffi.cast('double', dt)) return (x, P) def fun_wrapper(f, kind): f = eval(('lib.%s' % f), {'lib': lib}) def _update_inner_blas(x, P, z, R, extra_args): f(ffi.cast('double *', x.ctypes.data), ffi.cast('double *', P.ctypes.data), ffi.cast('double *', z.ctypes.data), ffi.cast('double *', R.ctypes.data), ffi.cast('double *', extra_args.ctypes.data)) if (self.msckf and (kind in self.feature_track_kinds)): y = z[:(- len(extra_args))] else: y = z return (x, P, y) return _update_inner_blas self._updates = {} for kind in kinds: self._updates[kind] = fun_wrapper(('update_%d' % kind), kind) def _update_blas(x, P, kind, z, R, extra_args=[]): return self._updates[kind](x, P, z, R, extra_args) self._predict = _predict_blas self._update = _update_blas<|docstring|>Generates process function and all observation functions for the kalman filter.<|endoftext|>
4404359376efac51f8ed4e3269a3e7dc681ae569ee6b8207e1f0bc986bded956
def _predict_and_update_batch(self, t, kind, z, R, extra_args, augment=False): 'The main kalman filter function\n Predicts the state and then updates a batch of observations\n\n dim_x: dimensionality of the state space\n dim_z: dimensionality of the observation and depends on kind\n n: number of observations\n\n Args:\n t (float): Time of observation\n kind (int): Type of observation\n z (vec [n,dim_z]): Measurements\n R (mat [n,dim_z, dim_z]): Measurement Noise\n extra_args (list, [n]): Values used in H computations\n ' if (self.filter_time is None): self.filter_time = t dt = (t - self.filter_time) assert (dt >= 0) (self.x, self.P) = self._predict(self.x, self.P, dt) self.filter_time = t (xk_km1, Pk_km1) = (np.copy(self.x).flatten(), np.copy(self.P)) y = [] for i in range(len(z)): z_i = np.array(z[i], dtype=np.float64, order='F') R_i = np.array(R[i], dtype=np.float64, order='F') extra_args_i = np.array(extra_args[i], dtype=np.float64, order='F') (self.x, self.P, y_i) = self._update(self.x, self.P, kind, z_i, R_i, extra_args=extra_args_i) y.append(y_i) (xk_k, Pk_k) = (np.copy(self.x).flatten(), np.copy(self.P)) if augment: self.augment() self.checkpoint((t, kind, z, R, extra_args)) return (xk_km1, xk_k, Pk_km1, Pk_k, t, kind, y, z, extra_args)
The main kalman filter function Predicts the state and then updates a batch of observations dim_x: dimensionality of the state space dim_z: dimensionality of the observation and depends on kind n: number of observations Args: t (float): Time of observation kind (int): Type of observation z (vec [n,dim_z]): Measurements R (mat [n,dim_z, dim_z]): Measurement Noise extra_args (list, [n]): Values used in H computations
examples/kalman/ekf_sym.py
_predict_and_update_batch
spectatorearth/laika
116
python
def _predict_and_update_batch(self, t, kind, z, R, extra_args, augment=False): 'The main kalman filter function\n Predicts the state and then updates a batch of observations\n\n dim_x: dimensionality of the state space\n dim_z: dimensionality of the observation and depends on kind\n n: number of observations\n\n Args:\n t (float): Time of observation\n kind (int): Type of observation\n z (vec [n,dim_z]): Measurements\n R (mat [n,dim_z, dim_z]): Measurement Noise\n extra_args (list, [n]): Values used in H computations\n ' if (self.filter_time is None): self.filter_time = t dt = (t - self.filter_time) assert (dt >= 0) (self.x, self.P) = self._predict(self.x, self.P, dt) self.filter_time = t (xk_km1, Pk_km1) = (np.copy(self.x).flatten(), np.copy(self.P)) y = [] for i in range(len(z)): z_i = np.array(z[i], dtype=np.float64, order='F') R_i = np.array(R[i], dtype=np.float64, order='F') extra_args_i = np.array(extra_args[i], dtype=np.float64, order='F') (self.x, self.P, y_i) = self._update(self.x, self.P, kind, z_i, R_i, extra_args=extra_args_i) y.append(y_i) (xk_k, Pk_k) = (np.copy(self.x).flatten(), np.copy(self.P)) if augment: self.augment() self.checkpoint((t, kind, z, R, extra_args)) return (xk_km1, xk_k, Pk_km1, Pk_k, t, kind, y, z, extra_args)
def _predict_and_update_batch(self, t, kind, z, R, extra_args, augment=False): 'The main kalman filter function\n Predicts the state and then updates a batch of observations\n\n dim_x: dimensionality of the state space\n dim_z: dimensionality of the observation and depends on kind\n n: number of observations\n\n Args:\n t (float): Time of observation\n kind (int): Type of observation\n z (vec [n,dim_z]): Measurements\n R (mat [n,dim_z, dim_z]): Measurement Noise\n extra_args (list, [n]): Values used in H computations\n ' if (self.filter_time is None): self.filter_time = t dt = (t - self.filter_time) assert (dt >= 0) (self.x, self.P) = self._predict(self.x, self.P, dt) self.filter_time = t (xk_km1, Pk_km1) = (np.copy(self.x).flatten(), np.copy(self.P)) y = [] for i in range(len(z)): z_i = np.array(z[i], dtype=np.float64, order='F') R_i = np.array(R[i], dtype=np.float64, order='F') extra_args_i = np.array(extra_args[i], dtype=np.float64, order='F') (self.x, self.P, y_i) = self._update(self.x, self.P, kind, z_i, R_i, extra_args=extra_args_i) y.append(y_i) (xk_k, Pk_k) = (np.copy(self.x).flatten(), np.copy(self.P)) if augment: self.augment() self.checkpoint((t, kind, z, R, extra_args)) return (xk_km1, xk_k, Pk_km1, Pk_k, t, kind, y, z, extra_args)<|docstring|>The main kalman filter function Predicts the state and then updates a batch of observations dim_x: dimensionality of the state space dim_z: dimensionality of the observation and depends on kind n: number of observations Args: t (float): Time of observation kind (int): Type of observation z (vec [n,dim_z]): Measurements R (mat [n,dim_z, dim_z]): Measurement Noise extra_args (list, [n]): Values used in H computations<|endoftext|>
77c0f648a8667e7c099e8cca28fc97bb5e647b71d91f6784306d080471b3dd31
def rts_smooth(self, estimates, norm_quats=False): '\n Returns rts smoothed results of\n kalman filter estimates\n\n If the kalman state is augmented with\n old states only the main state is smoothed\n ' xk_n = estimates[(- 1)][0] Pk_n = estimates[(- 1)][2] Fk_1 = np.zeros(Pk_n.shape, dtype=np.float64) states_smoothed = [xk_n] covs_smoothed = [Pk_n] for k in range((len(estimates) - 2), (- 1), (- 1)): xk1_n = xk_n if norm_quats: xk1_n[3:7] /= np.linalg.norm(xk1_n[3:7]) Pk1_n = Pk_n (xk1_k, _, Pk1_k, _, t2, _, _, _, _) = estimates[(k + 1)] (_, xk_k, _, Pk_k, t1, _, _, _, _) = estimates[k] dt = (t2 - t1) self.F(xk_k, dt, Fk_1) d1 = self.dim_main d2 = self.dim_main_err Ck = np.linalg.solve(Pk1_k[(:d2, :d2)], Fk_1[(:d2, :d2)].dot(Pk_k[(:d2, :d2)].T)).T xk_n = xk_k delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64) self.inv_err_function(xk1_k, xk1_n, delta_x) delta_x[:d2] = Ck.dot(delta_x[:d2]) x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64) self.err_function(xk_k, delta_x, x_new) xk_n[:d1] = x_new[(:d1, 0)] Pk_n = Pk_k Pk_n[(:d2, :d2)] = (Pk_k[(:d2, :d2)] + Ck.dot((Pk1_n[(:d2, :d2)] - Pk1_k[(:d2, :d2)])).dot(Ck.T)) states_smoothed.append(xk_n) covs_smoothed.append(Pk_n) return (np.flipud(np.vstack(states_smoothed)), np.stack(covs_smoothed, 0)[::(- 1)])
Returns rts smoothed results of kalman filter estimates If the kalman state is augmented with old states only the main state is smoothed
examples/kalman/ekf_sym.py
rts_smooth
spectatorearth/laika
116
python
def rts_smooth(self, estimates, norm_quats=False): '\n Returns rts smoothed results of\n kalman filter estimates\n\n If the kalman state is augmented with\n old states only the main state is smoothed\n ' xk_n = estimates[(- 1)][0] Pk_n = estimates[(- 1)][2] Fk_1 = np.zeros(Pk_n.shape, dtype=np.float64) states_smoothed = [xk_n] covs_smoothed = [Pk_n] for k in range((len(estimates) - 2), (- 1), (- 1)): xk1_n = xk_n if norm_quats: xk1_n[3:7] /= np.linalg.norm(xk1_n[3:7]) Pk1_n = Pk_n (xk1_k, _, Pk1_k, _, t2, _, _, _, _) = estimates[(k + 1)] (_, xk_k, _, Pk_k, t1, _, _, _, _) = estimates[k] dt = (t2 - t1) self.F(xk_k, dt, Fk_1) d1 = self.dim_main d2 = self.dim_main_err Ck = np.linalg.solve(Pk1_k[(:d2, :d2)], Fk_1[(:d2, :d2)].dot(Pk_k[(:d2, :d2)].T)).T xk_n = xk_k delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64) self.inv_err_function(xk1_k, xk1_n, delta_x) delta_x[:d2] = Ck.dot(delta_x[:d2]) x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64) self.err_function(xk_k, delta_x, x_new) xk_n[:d1] = x_new[(:d1, 0)] Pk_n = Pk_k Pk_n[(:d2, :d2)] = (Pk_k[(:d2, :d2)] + Ck.dot((Pk1_n[(:d2, :d2)] - Pk1_k[(:d2, :d2)])).dot(Ck.T)) states_smoothed.append(xk_n) covs_smoothed.append(Pk_n) return (np.flipud(np.vstack(states_smoothed)), np.stack(covs_smoothed, 0)[::(- 1)])
def rts_smooth(self, estimates, norm_quats=False): '\n Returns rts smoothed results of\n kalman filter estimates\n\n If the kalman state is augmented with\n old states only the main state is smoothed\n ' xk_n = estimates[(- 1)][0] Pk_n = estimates[(- 1)][2] Fk_1 = np.zeros(Pk_n.shape, dtype=np.float64) states_smoothed = [xk_n] covs_smoothed = [Pk_n] for k in range((len(estimates) - 2), (- 1), (- 1)): xk1_n = xk_n if norm_quats: xk1_n[3:7] /= np.linalg.norm(xk1_n[3:7]) Pk1_n = Pk_n (xk1_k, _, Pk1_k, _, t2, _, _, _, _) = estimates[(k + 1)] (_, xk_k, _, Pk_k, t1, _, _, _, _) = estimates[k] dt = (t2 - t1) self.F(xk_k, dt, Fk_1) d1 = self.dim_main d2 = self.dim_main_err Ck = np.linalg.solve(Pk1_k[(:d2, :d2)], Fk_1[(:d2, :d2)].dot(Pk_k[(:d2, :d2)].T)).T xk_n = xk_k delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64) self.inv_err_function(xk1_k, xk1_n, delta_x) delta_x[:d2] = Ck.dot(delta_x[:d2]) x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64) self.err_function(xk_k, delta_x, x_new) xk_n[:d1] = x_new[(:d1, 0)] Pk_n = Pk_k Pk_n[(:d2, :d2)] = (Pk_k[(:d2, :d2)] + Ck.dot((Pk1_n[(:d2, :d2)] - Pk1_k[(:d2, :d2)])).dot(Ck.T)) states_smoothed.append(xk_n) covs_smoothed.append(Pk_n) return (np.flipud(np.vstack(states_smoothed)), np.stack(covs_smoothed, 0)[::(- 1)])<|docstring|>Returns rts smoothed results of kalman filter estimates If the kalman state is augmented with old states only the main state is smoothed<|endoftext|>
3b7ed0af9deec8eaf12aa117b4f3d5bdd3e40e848a0790f48ad1d204f1edeecc
def test_invalid_user_credentials(self): '\n Test get token pair view with invalid user credentials.\n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data={'username': 'testuser', 'password': '1234'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Incorrect authentication credentials.')
Test get token pair view with invalid user credentials.
tests/test_views.py
test_invalid_user_credentials
moiseshiraldo/django-rest-paseto-auth
7
python
def test_invalid_user_credentials(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data={'username': 'testuser', 'password': '1234'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Incorrect authentication credentials.')
def test_invalid_user_credentials(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data={'username': 'testuser', 'password': '1234'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Incorrect authentication credentials.')<|docstring|>Test get token pair view with invalid user credentials.<|endoftext|>
5f7c4bc04636ebc2be6ac12e869aec52f6e186dc9670c7e255493c83962986a5
def test_get_token_pair(self): '\n Test get token pair view with valid user credentials.\n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('refresh_token')) self.assertTrue(response.json().get('access_token')) refresh_token = response.json()['refresh_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(refresh_token), 'utf-8')) token_key = parsed['message']['key'] self.assertTrue(UserRefreshToken.objects.filter(key=token_key).exists())
Test get token pair view with valid user credentials.
tests/test_views.py
test_get_token_pair
moiseshiraldo/django-rest-paseto-auth
7
python
def test_get_token_pair(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('refresh_token')) self.assertTrue(response.json().get('access_token')) refresh_token = response.json()['refresh_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(refresh_token), 'utf-8')) token_key = parsed['message']['key'] self.assertTrue(UserRefreshToken.objects.filter(key=token_key).exists())
def test_get_token_pair(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('refresh_token')) self.assertTrue(response.json().get('access_token')) refresh_token = response.json()['refresh_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(refresh_token), 'utf-8')) token_key = parsed['message']['key'] self.assertTrue(UserRefreshToken.objects.filter(key=token_key).exists())<|docstring|>Test get token pair view with valid user credentials.<|endoftext|>
a22ed3df67feae56ab1d9bfa1123efd62b07c2ecbdefce76ed5de1d57a1132b2
def test_invalid_refresh_token(self): '\n Test get access token view with invalid refresh token.\n ' response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': 'qwerty'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')
Test get access token view with invalid refresh token.
tests/test_views.py
test_invalid_refresh_token
moiseshiraldo/django-rest-paseto-auth
7
python
def test_invalid_refresh_token(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': 'qwerty'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')
def test_invalid_refresh_token(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': 'qwerty'}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')<|docstring|>Test get access token view with invalid refresh token.<|endoftext|>
60bb9b393c9981f8617a9795123025c3b5fbb3e5fb2830e84d5139b7a934b925
def test_expired_refresh_token(self): '\n Test get access token view with expired refresh token.\n ' data = {'model': 'user', 'pk': self.user.pk, 'key': 'qwerty', 'type': tokens.REFRESH, 'exp': pendulum.now().subtract(seconds=10).to_atom_string()} refresh_token = paseto.create(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', claims=data) UserRefreshToken.objects.create(user=self.user, key=data['key']) response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token.decode()}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')
Test get access token view with expired refresh token.
tests/test_views.py
test_expired_refresh_token
moiseshiraldo/django-rest-paseto-auth
7
python
def test_expired_refresh_token(self): '\n \n ' data = {'model': 'user', 'pk': self.user.pk, 'key': 'qwerty', 'type': tokens.REFRESH, 'exp': pendulum.now().subtract(seconds=10).to_atom_string()} refresh_token = paseto.create(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', claims=data) UserRefreshToken.objects.create(user=self.user, key=data['key']) response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token.decode()}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')
def test_expired_refresh_token(self): '\n \n ' data = {'model': 'user', 'pk': self.user.pk, 'key': 'qwerty', 'type': tokens.REFRESH, 'exp': pendulum.now().subtract(seconds=10).to_atom_string()} refresh_token = paseto.create(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', claims=data) UserRefreshToken.objects.create(user=self.user, key=data['key']) response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token.decode()}) self.assertEqual(response.status_code, 401) self.assertEqual(response.json()['detail'], 'Invalid refresh token.')<|docstring|>Test get access token view with expired refresh token.<|endoftext|>
165a169990744fc71cccebd6fd383b298535bb21a7f78d018f7aa23973e8c865
def test_get_access_token(self): '\n Test get access token view.\n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) refresh_token = response.json()['refresh_token'] response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token}) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('access_token')) access_token = response.json()['access_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(access_token), 'utf-8')) self.assertEqual(parsed['message']['type'], 'access')
Test get access token view.
tests/test_views.py
test_get_access_token
moiseshiraldo/django-rest-paseto-auth
7
python
def test_get_access_token(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) refresh_token = response.json()['refresh_token'] response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token}) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('access_token')) access_token = response.json()['access_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(access_token), 'utf-8')) self.assertEqual(parsed['message']['type'], 'access')
def test_get_access_token(self): '\n \n ' response = self.client.post(reverse('paseto_auth:get_token_pair'), data=self.user_credentials) refresh_token = response.json()['refresh_token'] response = self.client.post(reverse('paseto_auth:get_access_token'), data={'refresh_token': refresh_token}) self.assertEqual(response.status_code, 200) self.assertTrue(response.json().get('access_token')) access_token = response.json()['access_token'] parsed = paseto.parse(key=bytes.fromhex(AUTH_SETTINGS['SECRET_KEY']), purpose='local', token=bytes(str(access_token), 'utf-8')) self.assertEqual(parsed['message']['type'], 'access')<|docstring|>Test get access token view.<|endoftext|>
7502671c11b48096bc3ca27147dabebe1094cbbfa7ed6dcf80aaa736dc586e3b
def __init__(self, emr_client, cache_filepath, cache_file_ttl, list_frequency=_DEFAULT_CACHE_LIST_FREQUENCY_SECONDS): 'Init cluster cache.\n\n :param emr_client: boto3 EMR client to use for list and describe calls\n :param cache_filepath: absolute local path to the cluster cache\n :param cache_file_ttl: expire (truncate) the cache after this many days\n :param list_frequency: list at most only every this many seconds\n ' self._emr_client = emr_client self._cache_filepath = cache_filepath self._cache_file_ttl = cache_file_ttl self._list_frequency = list_frequency
Init cluster cache. :param emr_client: boto3 EMR client to use for list and describe calls :param cache_filepath: absolute local path to the cluster cache :param cache_file_ttl: expire (truncate) the cache after this many days :param list_frequency: list at most only every this many seconds
mrjob/cache.py
__init__
Affirm/mrjob
4
python
def __init__(self, emr_client, cache_filepath, cache_file_ttl, list_frequency=_DEFAULT_CACHE_LIST_FREQUENCY_SECONDS): 'Init cluster cache.\n\n :param emr_client: boto3 EMR client to use for list and describe calls\n :param cache_filepath: absolute local path to the cluster cache\n :param cache_file_ttl: expire (truncate) the cache after this many days\n :param list_frequency: list at most only every this many seconds\n ' self._emr_client = emr_client self._cache_filepath = cache_filepath self._cache_file_ttl = cache_file_ttl self._list_frequency = list_frequency
def __init__(self, emr_client, cache_filepath, cache_file_ttl, list_frequency=_DEFAULT_CACHE_LIST_FREQUENCY_SECONDS): 'Init cluster cache.\n\n :param emr_client: boto3 EMR client to use for list and describe calls\n :param cache_filepath: absolute local path to the cluster cache\n :param cache_file_ttl: expire (truncate) the cache after this many days\n :param list_frequency: list at most only every this many seconds\n ' self._emr_client = emr_client self._cache_filepath = cache_filepath self._cache_file_ttl = cache_file_ttl self._list_frequency = list_frequency<|docstring|>Init cluster cache. :param emr_client: boto3 EMR client to use for list and describe calls :param cache_filepath: absolute local path to the cluster cache :param cache_file_ttl: expire (truncate) the cache after this many days :param list_frequency: list at most only every this many seconds<|endoftext|>
1744d02d5919279d649feb5eafa4da8e877733ecbac80a1fd78ab7d76589f6ff
def _emr_cluster_describe(self, cluster_id): 'Describe the cluster and get the instance group/fleet info.' cluster_info = self._emr_client.describe_cluster(ClusterId=cluster_id)['Cluster'] collect_type = cluster_info.get('InstanceCollectionType', None) if (collect_type == 'INSTANCE_FLEET'): instance_info = list(_boto3_paginate('InstanceFleets', self._emr_client, 'list_instance_fleets', ClusterId=cluster_id)) else: instance_info = list(_boto3_paginate('InstanceGroups', self._emr_client, 'list_instance_groups', ClusterId=cluster_id)) return {'Cluster': cluster_info, 'Instances': instance_info}
Describe the cluster and get the instance group/fleet info.
mrjob/cache.py
_emr_cluster_describe
Affirm/mrjob
4
python
def _emr_cluster_describe(self, cluster_id): cluster_info = self._emr_client.describe_cluster(ClusterId=cluster_id)['Cluster'] collect_type = cluster_info.get('InstanceCollectionType', None) if (collect_type == 'INSTANCE_FLEET'): instance_info = list(_boto3_paginate('InstanceFleets', self._emr_client, 'list_instance_fleets', ClusterId=cluster_id)) else: instance_info = list(_boto3_paginate('InstanceGroups', self._emr_client, 'list_instance_groups', ClusterId=cluster_id)) return {'Cluster': cluster_info, 'Instances': instance_info}
def _emr_cluster_describe(self, cluster_id): cluster_info = self._emr_client.describe_cluster(ClusterId=cluster_id)['Cluster'] collect_type = cluster_info.get('InstanceCollectionType', None) if (collect_type == 'INSTANCE_FLEET'): instance_info = list(_boto3_paginate('InstanceFleets', self._emr_client, 'list_instance_fleets', ClusterId=cluster_id)) else: instance_info = list(_boto3_paginate('InstanceGroups', self._emr_client, 'list_instance_groups', ClusterId=cluster_id)) return {'Cluster': cluster_info, 'Instances': instance_info}<|docstring|>Describe the cluster and get the instance group/fleet info.<|endoftext|>
035074c9fec5cbaf6af1947fd33f1778e457ef2f6cc9ef2e78d175222d7a687e
def describe_cluster(self, cluster_id): 'Describes an EMR cluster from the given ID. Also describes and\n caches the instance group/fleet info.\n\n If cluster info for this ID exists in the cache return this data;\n otherwise make an EMR API call to retry this data. Since mrjob clusters\n should never be modified, this cached data will always be valid except\n cluster state.\n ' if (self._cache_filepath is None): return self._emr_cluster_describe(cluster_id) with self.cache_mutex('r+') as fd: self._handle_cache_expiry() content = self._load_cache(fd) cluster = content.get(cluster_id, None) if cluster: log.debug('Cluster cache hit: found cluster {}'.format(cluster_id)) return cluster log.debug('Cluster cache miss: no entry for cluster {}'.format(cluster_id)) content[cluster_id] = self._emr_cluster_describe(cluster_id) self._dump_cache(content, fd, False) return content[cluster_id]
Describes an EMR cluster from the given ID. Also describes and caches the instance group/fleet info. If cluster info for this ID exists in the cache return this data; otherwise make an EMR API call to retry this data. Since mrjob clusters should never be modified, this cached data will always be valid except cluster state.
mrjob/cache.py
describe_cluster
Affirm/mrjob
4
python
def describe_cluster(self, cluster_id): 'Describes an EMR cluster from the given ID. Also describes and\n caches the instance group/fleet info.\n\n If cluster info for this ID exists in the cache return this data;\n otherwise make an EMR API call to retry this data. Since mrjob clusters\n should never be modified, this cached data will always be valid except\n cluster state.\n ' if (self._cache_filepath is None): return self._emr_cluster_describe(cluster_id) with self.cache_mutex('r+') as fd: self._handle_cache_expiry() content = self._load_cache(fd) cluster = content.get(cluster_id, None) if cluster: log.debug('Cluster cache hit: found cluster {}'.format(cluster_id)) return cluster log.debug('Cluster cache miss: no entry for cluster {}'.format(cluster_id)) content[cluster_id] = self._emr_cluster_describe(cluster_id) self._dump_cache(content, fd, False) return content[cluster_id]
def describe_cluster(self, cluster_id): 'Describes an EMR cluster from the given ID. Also describes and\n caches the instance group/fleet info.\n\n If cluster info for this ID exists in the cache return this data;\n otherwise make an EMR API call to retry this data. Since mrjob clusters\n should never be modified, this cached data will always be valid except\n cluster state.\n ' if (self._cache_filepath is None): return self._emr_cluster_describe(cluster_id) with self.cache_mutex('r+') as fd: self._handle_cache_expiry() content = self._load_cache(fd) cluster = content.get(cluster_id, None) if cluster: log.debug('Cluster cache hit: found cluster {}'.format(cluster_id)) return cluster log.debug('Cluster cache miss: no entry for cluster {}'.format(cluster_id)) content[cluster_id] = self._emr_cluster_describe(cluster_id) self._dump_cache(content, fd, False) return content[cluster_id]<|docstring|>Describes an EMR cluster from the given ID. Also describes and caches the instance group/fleet info. If cluster info for this ID exists in the cache return this data; otherwise make an EMR API call to retry this data. Since mrjob clusters should never be modified, this cached data will always be valid except cluster state.<|endoftext|>
473653a1f5eaba2e1eca74a57e85499a33e8292e897afce01ea1dd396e47a139
def list_clusters_and_populate_cache(self, states, force_relist=False): 'Lists EMR clusters with specified state and populates the cache\n with their info.\n\n Only lists the clusters if the list marker file is older than the list\n frequency. Only describes the cluster info if the ID does not exist in\n the cache.\n\n Additionally, we only put entries in the cache if their state matches\n the specified state and update the state if it had changed. Therefore\n this function may remove entries from the cache.\n ' assert (self._cache_filepath is not None), 'This code requires the cluster cache to be present' with self.cache_mutex('r+') as fd: content = self._load_cache(fd) if (content and (not (self._should_list_and_populate() or force_relist))): return content new_content = {} for cluster_summary in self._get_list_paginator(states): cluster_id = cluster_summary['Id'] cluster_info = content.get(cluster_id, None) if (cluster_info is None): cluster_info = self._emr_cluster_describe(cluster_id) else: cluster_info['Cluster']['Status']['State'] = cluster_summary['Status']['State'] new_content[cluster_id] = cluster_info self._dump_cache(new_content, fd, True) return new_content
Lists EMR clusters with specified state and populates the cache with their info. Only lists the clusters if the list marker file is older than the list frequency. Only describes the cluster info if the ID does not exist in the cache. Additionally, we only put entries in the cache if their state matches the specified state and update the state if it had changed. Therefore this function may remove entries from the cache.
mrjob/cache.py
list_clusters_and_populate_cache
Affirm/mrjob
4
python
def list_clusters_and_populate_cache(self, states, force_relist=False): 'Lists EMR clusters with specified state and populates the cache\n with their info.\n\n Only lists the clusters if the list marker file is older than the list\n frequency. Only describes the cluster info if the ID does not exist in\n the cache.\n\n Additionally, we only put entries in the cache if their state matches\n the specified state and update the state if it had changed. Therefore\n this function may remove entries from the cache.\n ' assert (self._cache_filepath is not None), 'This code requires the cluster cache to be present' with self.cache_mutex('r+') as fd: content = self._load_cache(fd) if (content and (not (self._should_list_and_populate() or force_relist))): return content new_content = {} for cluster_summary in self._get_list_paginator(states): cluster_id = cluster_summary['Id'] cluster_info = content.get(cluster_id, None) if (cluster_info is None): cluster_info = self._emr_cluster_describe(cluster_id) else: cluster_info['Cluster']['Status']['State'] = cluster_summary['Status']['State'] new_content[cluster_id] = cluster_info self._dump_cache(new_content, fd, True) return new_content
def list_clusters_and_populate_cache(self, states, force_relist=False): 'Lists EMR clusters with specified state and populates the cache\n with their info.\n\n Only lists the clusters if the list marker file is older than the list\n frequency. Only describes the cluster info if the ID does not exist in\n the cache.\n\n Additionally, we only put entries in the cache if their state matches\n the specified state and update the state if it had changed. Therefore\n this function may remove entries from the cache.\n ' assert (self._cache_filepath is not None), 'This code requires the cluster cache to be present' with self.cache_mutex('r+') as fd: content = self._load_cache(fd) if (content and (not (self._should_list_and_populate() or force_relist))): return content new_content = {} for cluster_summary in self._get_list_paginator(states): cluster_id = cluster_summary['Id'] cluster_info = content.get(cluster_id, None) if (cluster_info is None): cluster_info = self._emr_cluster_describe(cluster_id) else: cluster_info['Cluster']['Status']['State'] = cluster_summary['Status']['State'] new_content[cluster_id] = cluster_info self._dump_cache(new_content, fd, True) return new_content<|docstring|>Lists EMR clusters with specified state and populates the cache with their info. Only lists the clusters if the list marker file is older than the list frequency. Only describes the cluster info if the ID does not exist in the cache. Additionally, we only put entries in the cache if their state matches the specified state and update the state if it had changed. Therefore this function may remove entries from the cache.<|endoftext|>
f8234fb5e35ad1c014b66d00fb77b9df7faa945c04ee1fb912e51b7bcc513277
@primitive_field(str) def install_arguments(self): '\n Optional arguments passed to the :code:`pip install` command created for the plugin installation.\n \n :rtype: str \n '
Optional arguments passed to the :code:`pip install` command created for the plugin installation. :rtype: str
src/cloudify/aria_extension_cloudify/v1_1/misc.py
install_arguments
tliron/aria-ng
0
python
@primitive_field(str) def install_arguments(self): '\n Optional arguments passed to the :code:`pip install` command created for the plugin installation.\n \n :rtype: str \n '
@primitive_field(str) def install_arguments(self): '\n Optional arguments passed to the :code:`pip install` command created for the plugin installation.\n \n :rtype: str \n '<|docstring|>Optional arguments passed to the :code:`pip install` command created for the plugin installation. :rtype: str<|endoftext|>
55af65df8ec80a34404a4f197932975d85426c32f93d15f07460373dc3aeff76
@client.command() async def challenge(ctx: discord.ext.commands.Context): 'Challenges user to a match' global match_requests message = ctx.message challenger = message.author member = message.mentions[0] match_requests.append(chessgame.ChessGame(challenger, member)) (await ctx.send('User {0.display_name}#{0.discriminator} has been challenged!'.format(message.mentions[0])))
Challenges user to a match
heroku-bot.py
challenge
Kaweees/Discord-Chess-Bot
6
python
@client.command() async def challenge(ctx: discord.ext.commands.Context): global match_requests message = ctx.message challenger = message.author member = message.mentions[0] match_requests.append(chessgame.ChessGame(challenger, member)) (await ctx.send('User {0.display_name}#{0.discriminator} has been challenged!'.format(message.mentions[0])))
@client.command() async def challenge(ctx: discord.ext.commands.Context): global match_requests message = ctx.message challenger = message.author member = message.mentions[0] match_requests.append(chessgame.ChessGame(challenger, member)) (await ctx.send('User {0.display_name}#{0.discriminator} has been challenged!'.format(message.mentions[0])))<|docstring|>Challenges user to a match<|endoftext|>
4ae278203479a6b58c4a163d0eebf02699717a148baa0b91bc601d27767d028a
@client.command() async def accept(ctx: discord.ext.commands.Context): "Accepts a user's request" global match_requests global matches message = ctx.message found = False for request in match_requests: if (request.players[1].id == message.author.id): svg = request.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') (await ctx.send('Challenge from <@{0.id}> has been accepted!'.format(request.players[0]))) (await ctx.send("It is <@{0.id}>'s turn!".format(request.player), file=fi)) matches.append(request) match_requests.remove(request) found = True if (not found): (await ctx.send('No pending challenges!'))
Accepts a user's request
heroku-bot.py
accept
Kaweees/Discord-Chess-Bot
6
python
@client.command() async def accept(ctx: discord.ext.commands.Context): global match_requests global matches message = ctx.message found = False for request in match_requests: if (request.players[1].id == message.author.id): svg = request.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') (await ctx.send('Challenge from <@{0.id}> has been accepted!'.format(request.players[0]))) (await ctx.send("It is <@{0.id}>'s turn!".format(request.player), file=fi)) matches.append(request) match_requests.remove(request) found = True if (not found): (await ctx.send('No pending challenges!'))
@client.command() async def accept(ctx: discord.ext.commands.Context): global match_requests global matches message = ctx.message found = False for request in match_requests: if (request.players[1].id == message.author.id): svg = request.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') (await ctx.send('Challenge from <@{0.id}> has been accepted!'.format(request.players[0]))) (await ctx.send("It is <@{0.id}>'s turn!".format(request.player), file=fi)) matches.append(request) match_requests.remove(request) found = True if (not found): (await ctx.send('No pending challenges!'))<|docstring|>Accepts a user's request<|endoftext|>
cd86a74a5fdecf0679d97095616adec0dadcbb4ebf614389a3690ed1e90ff6f4
@client.command() async def move(ctx: discord.ext.commands.Context): 'Makes move' global matches message = ctx.message move = message.content.split(' ')[1] found = False for match in matches: if (match.player.id == message.author.id): found = True (valid, result) = match.make_move(move) winner = None draw = False if (result is not None): if (result == '1-0'): winner = match.player elif (result == '0-1'): winner = match.players[(match.moves % 2)] elif (result == '1/2-1/2'): draw = True if (not valid): (await ctx.send("Invalid move, '{0}'".format(move))) else: svg = match.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') m = "It is now <@{0.id}>'s turn!".format(match.player) if (winner is not None): m = '<@{0.id}> wins!'.format(winner) elif (draw is True): m = 'The match was a draw!' (await ctx.send(m, file=fi)) if (result is not None): matches.remove(match) if (not found): (await ctx.send('No match currently.'))
Makes move
heroku-bot.py
move
Kaweees/Discord-Chess-Bot
6
python
@client.command() async def move(ctx: discord.ext.commands.Context): global matches message = ctx.message move = message.content.split(' ')[1] found = False for match in matches: if (match.player.id == message.author.id): found = True (valid, result) = match.make_move(move) winner = None draw = False if (result is not None): if (result == '1-0'): winner = match.player elif (result == '0-1'): winner = match.players[(match.moves % 2)] elif (result == '1/2-1/2'): draw = True if (not valid): (await ctx.send("Invalid move, '{0}'".format(move))) else: svg = match.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') m = "It is now <@{0.id}>'s turn!".format(match.player) if (winner is not None): m = '<@{0.id}> wins!'.format(winner) elif (draw is True): m = 'The match was a draw!' (await ctx.send(m, file=fi)) if (result is not None): matches.remove(match) if (not found): (await ctx.send('No match currently.'))
@client.command() async def move(ctx: discord.ext.commands.Context): global matches message = ctx.message move = message.content.split(' ')[1] found = False for match in matches: if (match.player.id == message.author.id): found = True (valid, result) = match.make_move(move) winner = None draw = False if (result is not None): if (result == '1-0'): winner = match.player elif (result == '0-1'): winner = match.players[(match.moves % 2)] elif (result == '1/2-1/2'): draw = True if (not valid): (await ctx.send("Invalid move, '{0}'".format(move))) else: svg = match.board_to_svg() with open('board.svg', 'w') as f: f.write(svg) cairosvg.svg2png(url='board.svg', write_to='board.png') fi = discord.File('board.png') m = "It is now <@{0.id}>'s turn!".format(match.player) if (winner is not None): m = '<@{0.id}> wins!'.format(winner) elif (draw is True): m = 'The match was a draw!' (await ctx.send(m, file=fi)) if (result is not None): matches.remove(match) if (not found): (await ctx.send('No match currently.'))<|docstring|>Makes move<|endoftext|>
e6d0f03964379a631a41503fdf35dedab054f6040363630decf8458271461ab5
@client.command() async def end(ctx: discord.ext.commands.Context): 'Ends match, what a loser' global matches message = ctx.message found = False for match in matches: if (match.player.id == message.author.id): found = True matches.remove(match) (await ctx.send('Match forfeited.')) if (not found): (await ctx.send('No match currently.'))
Ends match, what a loser
heroku-bot.py
end
Kaweees/Discord-Chess-Bot
6
python
@client.command() async def end(ctx: discord.ext.commands.Context): global matches message = ctx.message found = False for match in matches: if (match.player.id == message.author.id): found = True matches.remove(match) (await ctx.send('Match forfeited.')) if (not found): (await ctx.send('No match currently.'))
@client.command() async def end(ctx: discord.ext.commands.Context): global matches message = ctx.message found = False for match in matches: if (match.player.id == message.author.id): found = True matches.remove(match) (await ctx.send('Match forfeited.')) if (not found): (await ctx.send('No match currently.'))<|docstring|>Ends match, what a loser<|endoftext|>
72091a5c5bea1735a4df27ba146b48f2aa0e0bb431056d52dd2fcd46582940b5
@client.command() async def server(ctx): 'Shows server info' channel = ctx.message.channel embed = discord.Embed(title="Kaweees's Player Stats", url='https://google.com') embed.title = server.name embed.description = 'Server Info' embed.color = 8365648 embed.title = server.name (await ctx.send(embed=embed)) (await ctx.send(':flag_us: :flag_US: '))
Shows server info
heroku-bot.py
server
Kaweees/Discord-Chess-Bot
6
python
@client.command() async def server(ctx): channel = ctx.message.channel embed = discord.Embed(title="Kaweees's Player Stats", url='https://google.com') embed.title = server.name embed.description = 'Server Info' embed.color = 8365648 embed.title = server.name (await ctx.send(embed=embed)) (await ctx.send(':flag_us: :flag_US: '))
@client.command() async def server(ctx): channel = ctx.message.channel embed = discord.Embed(title="Kaweees's Player Stats", url='https://google.com') embed.title = server.name embed.description = 'Server Info' embed.color = 8365648 embed.title = server.name (await ctx.send(embed=embed)) (await ctx.send(':flag_us: :flag_US: '))<|docstring|>Shows server info<|endoftext|>
61b06c7cc99cc76c8fceee61a7bfe476116965730f4f22e4fa10e6c6083adcca
def fit_predict(self, X, y, X_test, **kwargs): '重写继承即可\n :return: clf, score, test_preds\n ' clf = LGBMClassifier() clf.fit(X, y) score = roc_auc_score(y, clf.predict_proba(X)[(:, 1)]) test_preds = clf.predict_proba(X_test) return (clf, score, test_preds)
重写继承即可 :return: clf, score, test_preds
tql/ml/semi/pseudo.py
fit_predict
Jie-Yuan/1_DataMining
14
python
def fit_predict(self, X, y, X_test, **kwargs): '重写继承即可\n :return: clf, score, test_preds\n ' clf = LGBMClassifier() clf.fit(X, y) score = roc_auc_score(y, clf.predict_proba(X)[(:, 1)]) test_preds = clf.predict_proba(X_test) return (clf, score, test_preds)
def fit_predict(self, X, y, X_test, **kwargs): '重写继承即可\n :return: clf, score, test_preds\n ' clf = LGBMClassifier() clf.fit(X, y) score = roc_auc_score(y, clf.predict_proba(X)[(:, 1)]) test_preds = clf.predict_proba(X_test) return (clf, score, test_preds)<|docstring|>重写继承即可 :return: clf, score, test_preds<|endoftext|>
01af9b8b3a15865a79d46bb03d36785b081fa789840f85446b9c7e091cc16a20
def _create_cmd(self): 'Return full command to install AFNI.' comment = '#--------------------\n# Install AFNI {}\n#--------------------'.format(self.version) if self.use_binaries: chunks = [comment, self.install_binaries()] else: raise ValueError('`use_binaries=True` is the only available option at this time.') return '\n'.join(chunks)
Return full command to install AFNI.
neurodocker/interfaces/afni.py
_create_cmd
giovtorres/neurodocker
1
python
def _create_cmd(self): comment = '#--------------------\n# Install AFNI {}\n#--------------------'.format(self.version) if self.use_binaries: chunks = [comment, self.install_binaries()] else: raise ValueError('`use_binaries=True` is the only available option at this time.') return '\n'.join(chunks)
def _create_cmd(self): comment = '#--------------------\n# Install AFNI {}\n#--------------------'.format(self.version) if self.use_binaries: chunks = [comment, self.install_binaries()] else: raise ValueError('`use_binaries=True` is the only available option at this time.') return '\n'.join(chunks)<|docstring|>Return full command to install AFNI.<|endoftext|>
d1d6286aaa5b5ad42850203801dcc33abe1ab766a8b10cae8706cf4ed09a0d87
def install_binaries(self): 'Return Dockerfile instructions to download and install AFNI\n binaries.\n ' url = self._get_binaries_urls(self.version) if self.check_urls: check_url(url) pkgs = self._get_binaries_dependencies() cmd = '{install}\n&& libs_path=/usr/lib/x86_64-linux-gnu\n&& if [ -f $libs_path/libgsl.so.19 ]; then\n ln $libs_path/libgsl.so.19 $libs_path/libgsl.so.0;\n fi'.format(**manage_pkgs[self.pkg_manager]).format(pkgs=pkgs) if (self.pkg_manager == 'apt'): deb_url = 'http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb' cmd += '\n&& echo "Install libxp (not in all ubuntu/debian repositories)"\n&& apt-get install -yq --no-install-recommends libxp6\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libxp6.deb -sSL {}\n && dpkg -i /tmp/libxp6.deb && rm -f /tmp/libxp6.deb"'.format(deb_url) deb_url = 'http://mirrors.kernel.org/debian/pool/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb' cmd += '\n&& echo "Install libpng12 (not in all ubuntu/debian repositories"\n&& apt-get install -yq --no-install-recommends libpng12-0\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libpng12.deb -sSL {}\n && dpkg -i /tmp/libpng12.deb && rm -f /tmp/libpng12.deb"'.format(deb_url) if self.install_r: sh_url = 'https://gist.githubusercontent.com/kaczmarj/8e3792ae1af70b03788163c44f453b43/raw/0577c62e4771236adf0191c826a25249eb69a130/R_installer_debian_ubuntu.sh' cmd += '\n&& echo "Install R"\n&& apt-get install -yq --no-install-recommends\n\tr-base-dev r-cran-rmpi libnlopt-dev\n || /bin/bash -c "\n curl --retry 5 -o /tmp/install_R.sh -sSL {}\n && /bin/bash /tmp/install_R.sh"'.format(sh_url) cmd += '\n&& {clean}\n&& echo "Downloading AFNI ..."\n&& mkdir -p /opt/afni\n&& curl -sSL --retry 5 {}\n| tar zx -C /opt/afni --strip-components=1'.format(url, **manage_pkgs[self.pkg_manager]) if self.install_r: cmd += '\n&& /opt/afni/rPkgsInstall -pkgs ALL\n&& rm -rf /tmp/*' cmd = indent('RUN', cmd) env_cmd = 'PATH=/opt/afni:$PATH' env_cmd = indent('ENV', env_cmd) return '\n'.join((env_cmd, cmd))
Return Dockerfile instructions to download and install AFNI binaries.
neurodocker/interfaces/afni.py
install_binaries
giovtorres/neurodocker
1
python
def install_binaries(self): 'Return Dockerfile instructions to download and install AFNI\n binaries.\n ' url = self._get_binaries_urls(self.version) if self.check_urls: check_url(url) pkgs = self._get_binaries_dependencies() cmd = '{install}\n&& libs_path=/usr/lib/x86_64-linux-gnu\n&& if [ -f $libs_path/libgsl.so.19 ]; then\n ln $libs_path/libgsl.so.19 $libs_path/libgsl.so.0;\n fi'.format(**manage_pkgs[self.pkg_manager]).format(pkgs=pkgs) if (self.pkg_manager == 'apt'): deb_url = 'http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb' cmd += '\n&& echo "Install libxp (not in all ubuntu/debian repositories)"\n&& apt-get install -yq --no-install-recommends libxp6\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libxp6.deb -sSL {}\n && dpkg -i /tmp/libxp6.deb && rm -f /tmp/libxp6.deb"'.format(deb_url) deb_url = 'http://mirrors.kernel.org/debian/pool/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb' cmd += '\n&& echo "Install libpng12 (not in all ubuntu/debian repositories"\n&& apt-get install -yq --no-install-recommends libpng12-0\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libpng12.deb -sSL {}\n && dpkg -i /tmp/libpng12.deb && rm -f /tmp/libpng12.deb"'.format(deb_url) if self.install_r: sh_url = 'https://gist.githubusercontent.com/kaczmarj/8e3792ae1af70b03788163c44f453b43/raw/0577c62e4771236adf0191c826a25249eb69a130/R_installer_debian_ubuntu.sh' cmd += '\n&& echo "Install R"\n&& apt-get install -yq --no-install-recommends\n\tr-base-dev r-cran-rmpi libnlopt-dev\n || /bin/bash -c "\n curl --retry 5 -o /tmp/install_R.sh -sSL {}\n && /bin/bash /tmp/install_R.sh"'.format(sh_url) cmd += '\n&& {clean}\n&& echo "Downloading AFNI ..."\n&& mkdir -p /opt/afni\n&& curl -sSL --retry 5 {}\n| tar zx -C /opt/afni --strip-components=1'.format(url, **manage_pkgs[self.pkg_manager]) if self.install_r: cmd += '\n&& /opt/afni/rPkgsInstall -pkgs ALL\n&& rm -rf /tmp/*' cmd = indent('RUN', cmd) env_cmd = 'PATH=/opt/afni:$PATH' env_cmd = indent('ENV', env_cmd) return '\n'.join((env_cmd, cmd))
def install_binaries(self): 'Return Dockerfile instructions to download and install AFNI\n binaries.\n ' url = self._get_binaries_urls(self.version) if self.check_urls: check_url(url) pkgs = self._get_binaries_dependencies() cmd = '{install}\n&& libs_path=/usr/lib/x86_64-linux-gnu\n&& if [ -f $libs_path/libgsl.so.19 ]; then\n ln $libs_path/libgsl.so.19 $libs_path/libgsl.so.0;\n fi'.format(**manage_pkgs[self.pkg_manager]).format(pkgs=pkgs) if (self.pkg_manager == 'apt'): deb_url = 'http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb' cmd += '\n&& echo "Install libxp (not in all ubuntu/debian repositories)"\n&& apt-get install -yq --no-install-recommends libxp6\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libxp6.deb -sSL {}\n && dpkg -i /tmp/libxp6.deb && rm -f /tmp/libxp6.deb"'.format(deb_url) deb_url = 'http://mirrors.kernel.org/debian/pool/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb' cmd += '\n&& echo "Install libpng12 (not in all ubuntu/debian repositories"\n&& apt-get install -yq --no-install-recommends libpng12-0\n|| /bin/bash -c "\n curl --retry 5 -o /tmp/libpng12.deb -sSL {}\n && dpkg -i /tmp/libpng12.deb && rm -f /tmp/libpng12.deb"'.format(deb_url) if self.install_r: sh_url = 'https://gist.githubusercontent.com/kaczmarj/8e3792ae1af70b03788163c44f453b43/raw/0577c62e4771236adf0191c826a25249eb69a130/R_installer_debian_ubuntu.sh' cmd += '\n&& echo "Install R"\n&& apt-get install -yq --no-install-recommends\n\tr-base-dev r-cran-rmpi libnlopt-dev\n || /bin/bash -c "\n curl --retry 5 -o /tmp/install_R.sh -sSL {}\n && /bin/bash /tmp/install_R.sh"'.format(sh_url) cmd += '\n&& {clean}\n&& echo "Downloading AFNI ..."\n&& mkdir -p /opt/afni\n&& curl -sSL --retry 5 {}\n| tar zx -C /opt/afni --strip-components=1'.format(url, **manage_pkgs[self.pkg_manager]) if self.install_r: cmd += '\n&& /opt/afni/rPkgsInstall -pkgs ALL\n&& rm -rf /tmp/*' cmd = indent('RUN', cmd) env_cmd = 'PATH=/opt/afni:$PATH' env_cmd = indent('ENV', env_cmd) return '\n'.join((env_cmd, cmd))<|docstring|>Return Dockerfile instructions to download and install AFNI binaries.<|endoftext|>
acfe4e65faa300c2061082144d516c6011ef50676888e2f3d9179037d619e6f1
def flatten(list_of_lists): '[[a, b], [c, d]] -> [a, b, c, d]' for lst in list_of_lists: for elem in lst: (yield elem)
[[a, b], [c, d]] -> [a, b, c, d]
src/mesher/voxel.py
flatten
fhennecker/semiteleporter
0
python
def flatten(list_of_lists): for lst in list_of_lists: for elem in lst: (yield elem)
def flatten(list_of_lists): for lst in list_of_lists: for elem in lst: (yield elem)<|docstring|>[[a, b], [c, d]] -> [a, b, c, d]<|endoftext|>
5838709307436e1d1f52fd833138adfcf5f27c6d44ac225cdcd0c5035e3b8671
def combine(Xrange, Yrange, Zrange): '\n\tGenerate all possible combinations of x,y,z\n\t' for x in Xrange: for y in Yrange: for z in Zrange: (yield (x, y, z))
Generate all possible combinations of x,y,z
src/mesher/voxel.py
combine
fhennecker/semiteleporter
0
python
def combine(Xrange, Yrange, Zrange): '\n\t\n\t' for x in Xrange: for y in Yrange: for z in Zrange: (yield (x, y, z))
def combine(Xrange, Yrange, Zrange): '\n\t\n\t' for x in Xrange: for y in Yrange: for z in Zrange: (yield (x, y, z))<|docstring|>Generate all possible combinations of x,y,z<|endoftext|>
d146b3daa552ced32ff1d4229afd5e07bf8891432b790f2dea2369650b2e6e97
def voxelIndexForPoint(self, point): ' Returns the index of the voxel the point x, y, z belongs to ' fl = (lambda x: int(floor(x))) xVoxel = fl((point.x / self.voxelSize)) yVoxel = fl((point.y / self.voxelSize)) zVoxel = fl((point.z / self.voxelSize)) return (xVoxel, yVoxel, zVoxel)
Returns the index of the voxel the point x, y, z belongs to
src/mesher/voxel.py
voxelIndexForPoint
fhennecker/semiteleporter
0
python
def voxelIndexForPoint(self, point): ' ' fl = (lambda x: int(floor(x))) xVoxel = fl((point.x / self.voxelSize)) yVoxel = fl((point.y / self.voxelSize)) zVoxel = fl((point.z / self.voxelSize)) return (xVoxel, yVoxel, zVoxel)
def voxelIndexForPoint(self, point): ' ' fl = (lambda x: int(floor(x))) xVoxel = fl((point.x / self.voxelSize)) yVoxel = fl((point.y / self.voxelSize)) zVoxel = fl((point.z / self.voxelSize)) return (xVoxel, yVoxel, zVoxel)<|docstring|>Returns the index of the voxel the point x, y, z belongs to<|endoftext|>
515dd1e16f3995dd7c5f5884f41524330dcb66fddd26c397200d38c86705a26f
def addPoints(self, pointsList): ' Adds a list of points in this format (lists can be changed to tuples):\n\t\t\t[[x1, y1, z1], [x2, y2, z2], ...] ' for point in pointsList: self.addPoint(point)
Adds a list of points in this format (lists can be changed to tuples): [[x1, y1, z1], [x2, y2, z2], ...]
src/mesher/voxel.py
addPoints
fhennecker/semiteleporter
0
python
def addPoints(self, pointsList): ' Adds a list of points in this format (lists can be changed to tuples):\n\t\t\t[[x1, y1, z1], [x2, y2, z2], ...] ' for point in pointsList: self.addPoint(point)
def addPoints(self, pointsList): ' Adds a list of points in this format (lists can be changed to tuples):\n\t\t\t[[x1, y1, z1], [x2, y2, z2], ...] ' for point in pointsList: self.addPoint(point)<|docstring|>Adds a list of points in this format (lists can be changed to tuples): [[x1, y1, z1], [x2, y2, z2], ...]<|endoftext|>
2d0899acfea42d8d73600ab176318491d1843eff4595ee4793c941e2baf7870b
def allPoints(self): ' Returns a list of all points contained in the VoxelSpace ' res = [] for voxel in self.voxels: res += self.voxels[voxel] return res
Returns a list of all points contained in the VoxelSpace
src/mesher/voxel.py
allPoints
fhennecker/semiteleporter
0
python
def allPoints(self): ' ' res = [] for voxel in self.voxels: res += self.voxels[voxel] return res
def allPoints(self): ' ' res = [] for voxel in self.voxels: res += self.voxels[voxel] return res<|docstring|>Returns a list of all points contained in the VoxelSpace<|endoftext|>
5f32ee13027e0612f5c99d5a8d6bb525db44ad79bf956c705d0709a84ee7ac78
def pointsInCube(self, vx, vy, vz, neighbours=0): '\n\t\tReturns a list of all points within a cube centered on vx,vy,vz\n\t\textended to neighbours\n\t\t' return self.pointsInVoxels(self.voxelsInLayer(vx, vy, vz, 0, neighbours))
Returns a list of all points within a cube centered on vx,vy,vz extended to neighbours
src/mesher/voxel.py
pointsInCube
fhennecker/semiteleporter
0
python
def pointsInCube(self, vx, vy, vz, neighbours=0): '\n\t\tReturns a list of all points within a cube centered on vx,vy,vz\n\t\textended to neighbours\n\t\t' return self.pointsInVoxels(self.voxelsInLayer(vx, vy, vz, 0, neighbours))
def pointsInCube(self, vx, vy, vz, neighbours=0): '\n\t\tReturns a list of all points within a cube centered on vx,vy,vz\n\t\textended to neighbours\n\t\t' return self.pointsInVoxels(self.voxelsInLayer(vx, vy, vz, 0, neighbours))<|docstring|>Returns a list of all points within a cube centered on vx,vy,vz extended to neighbours<|endoftext|>
8e2f6b38ccf4b0a881d4a5399c7be29cbc7deb0eed07448d92dec394e0dfb19e
def voxelsInLayer(self, vx, vy, vz, inner=1, outer=2): '\n\t\tReturns a list of all voxels containing points within a hollow voxel cube.\n\t\t' (xmin, xmax, ymin, ymax) = (((vx - outer) + 1), (vx + outer), ((vy - outer) + 1), (vy + outer)) voxels = list(self.range3D(xmin, xmax, ymin, ymax, ((vz - outer) + 1), ((vz - inner) + 1))) voxels += list(self.range3D(xmin, xmax, ymin, ymax, (vz + inner), (vz + outer))) (ymin, ymax, zmin, zmax) = (((vy - outer) + 1), (vy + outer), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(((vx - outer) + 1), ((vx - inner) + 1), ymin, ymax, zmin, zmax)) voxels += list(self.range3D((vx + inner), (vx + outer), ymin, ymax, zmin, zmax)) (xmin, xmax, zmin, zmax) = (((vx - inner) + 1), (vx + inner), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(xmin, xmax, ((vy - outer) + 1), ((vy - inner) + 1), zmin, zmax)) voxels += list(self.range3D(xmin, xmax, (vy + inner), (vy + outer), zmin, zmax)) return list(set(ifilter(self.voxels.get, voxels)))
Returns a list of all voxels containing points within a hollow voxel cube.
src/mesher/voxel.py
voxelsInLayer
fhennecker/semiteleporter
0
python
def voxelsInLayer(self, vx, vy, vz, inner=1, outer=2): '\n\t\t\n\t\t' (xmin, xmax, ymin, ymax) = (((vx - outer) + 1), (vx + outer), ((vy - outer) + 1), (vy + outer)) voxels = list(self.range3D(xmin, xmax, ymin, ymax, ((vz - outer) + 1), ((vz - inner) + 1))) voxels += list(self.range3D(xmin, xmax, ymin, ymax, (vz + inner), (vz + outer))) (ymin, ymax, zmin, zmax) = (((vy - outer) + 1), (vy + outer), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(((vx - outer) + 1), ((vx - inner) + 1), ymin, ymax, zmin, zmax)) voxels += list(self.range3D((vx + inner), (vx + outer), ymin, ymax, zmin, zmax)) (xmin, xmax, zmin, zmax) = (((vx - inner) + 1), (vx + inner), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(xmin, xmax, ((vy - outer) + 1), ((vy - inner) + 1), zmin, zmax)) voxels += list(self.range3D(xmin, xmax, (vy + inner), (vy + outer), zmin, zmax)) return list(set(ifilter(self.voxels.get, voxels)))
def voxelsInLayer(self, vx, vy, vz, inner=1, outer=2): '\n\t\t\n\t\t' (xmin, xmax, ymin, ymax) = (((vx - outer) + 1), (vx + outer), ((vy - outer) + 1), (vy + outer)) voxels = list(self.range3D(xmin, xmax, ymin, ymax, ((vz - outer) + 1), ((vz - inner) + 1))) voxels += list(self.range3D(xmin, xmax, ymin, ymax, (vz + inner), (vz + outer))) (ymin, ymax, zmin, zmax) = (((vy - outer) + 1), (vy + outer), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(((vx - outer) + 1), ((vx - inner) + 1), ymin, ymax, zmin, zmax)) voxels += list(self.range3D((vx + inner), (vx + outer), ymin, ymax, zmin, zmax)) (xmin, xmax, zmin, zmax) = (((vx - inner) + 1), (vx + inner), ((vz - inner) + 1), (vz + inner)) voxels += list(self.range3D(xmin, xmax, ((vy - outer) + 1), ((vy - inner) + 1), zmin, zmax)) voxels += list(self.range3D(xmin, xmax, (vy + inner), (vy + outer), zmin, zmax)) return list(set(ifilter(self.voxels.get, voxels)))<|docstring|>Returns a list of all voxels containing points within a hollow voxel cube.<|endoftext|>
c6f0809e47fd3a1e3bea06257753c6664ff5533dbe887338d47127587e9ae19f
def voxelsInRegion(self, cornerA, cornerB): ' Returns all voxels within the parallelepipedic \n\t\t\tregion defined by the two corners in argument ' def rangeBuilder(a, b): if (a < b): return (a, (b + 1)) else: return (b, (a + 1)) args = ((rangeBuilder(cornerA[0], cornerB[0]) + rangeBuilder(cornerA[1], cornerB[1])) + rangeBuilder(cornerA[2], cornerB[2])) return ifilter(self.voxels.get, self.range3D(*args))
Returns all voxels within the parallelepipedic region defined by the two corners in argument
src/mesher/voxel.py
voxelsInRegion
fhennecker/semiteleporter
0
python
def voxelsInRegion(self, cornerA, cornerB): ' Returns all voxels within the parallelepipedic \n\t\t\tregion defined by the two corners in argument ' def rangeBuilder(a, b): if (a < b): return (a, (b + 1)) else: return (b, (a + 1)) args = ((rangeBuilder(cornerA[0], cornerB[0]) + rangeBuilder(cornerA[1], cornerB[1])) + rangeBuilder(cornerA[2], cornerB[2])) return ifilter(self.voxels.get, self.range3D(*args))
def voxelsInRegion(self, cornerA, cornerB): ' Returns all voxels within the parallelepipedic \n\t\t\tregion defined by the two corners in argument ' def rangeBuilder(a, b): if (a < b): return (a, (b + 1)) else: return (b, (a + 1)) args = ((rangeBuilder(cornerA[0], cornerB[0]) + rangeBuilder(cornerA[1], cornerB[1])) + rangeBuilder(cornerA[2], cornerB[2])) return ifilter(self.voxels.get, self.range3D(*args))<|docstring|>Returns all voxels within the parallelepipedic region defined by the two corners in argument<|endoftext|>
5fa08b324344375341fd69b30b6a03024ca69486bb0baeae18ae4426ec94bfc6
def closestPointsToEdge(self, a, b, distanceLimit): ' Finds the k closest points to edge a, b, with a voxel distance limit ' aVoxel = self.voxelIndexForPoint(a) bVoxel = self.voxelIndexForPoint(b) distance = (lambda p: (norm3D((a - p)) + norm3D((b - p)))) eligible = (lambda p: (p not in (a, b))) points = self.pointsInVoxels(self.voxelsInRegion(aVoxel, bVoxel)) (yield sorted(ifilter(eligible, points), key=distance)) for layer in xrange(1, distanceLimit): points = self.pointsInVoxels(self.voxelsAroundRegion(aVoxel, bVoxel, layer)) (yield sorted(points, key=distance))
Finds the k closest points to edge a, b, with a voxel distance limit
src/mesher/voxel.py
closestPointsToEdge
fhennecker/semiteleporter
0
python
def closestPointsToEdge(self, a, b, distanceLimit): ' ' aVoxel = self.voxelIndexForPoint(a) bVoxel = self.voxelIndexForPoint(b) distance = (lambda p: (norm3D((a - p)) + norm3D((b - p)))) eligible = (lambda p: (p not in (a, b))) points = self.pointsInVoxels(self.voxelsInRegion(aVoxel, bVoxel)) (yield sorted(ifilter(eligible, points), key=distance)) for layer in xrange(1, distanceLimit): points = self.pointsInVoxels(self.voxelsAroundRegion(aVoxel, bVoxel, layer)) (yield sorted(points, key=distance))
def closestPointsToEdge(self, a, b, distanceLimit): ' ' aVoxel = self.voxelIndexForPoint(a) bVoxel = self.voxelIndexForPoint(b) distance = (lambda p: (norm3D((a - p)) + norm3D((b - p)))) eligible = (lambda p: (p not in (a, b))) points = self.pointsInVoxels(self.voxelsInRegion(aVoxel, bVoxel)) (yield sorted(ifilter(eligible, points), key=distance)) for layer in xrange(1, distanceLimit): points = self.pointsInVoxels(self.voxelsAroundRegion(aVoxel, bVoxel, layer)) (yield sorted(points, key=distance))<|docstring|>Finds the k closest points to edge a, b, with a voxel distance limit<|endoftext|>
50e2a1d026cd946db398d073301bf7fe1e77e37694bc2d29d2c2514f57005630
def closestPointTo(self, point, distanceLimit=10, requiresDifferent=False): " Finds and returns the closest point to (x, y z) \n\t\t\twe'll only look in voxels within distanceLimit (distance in voxels)" (cx, cy, cz) = self.voxelIndexForPoint(point) for i in xrange(distanceLimit): points = self.pointsInVoxels(self.voxelsInLayer(cx, cy, cz, i, (i + 1))) if points: resList = sorted(points, key=point.distance) if (len(resList) == 0): continue if ((not requiresDifferent) or (requiresDifferent and (resList[0] != point))): return resList[0] elif (len(resList) > 1): return resList[1] else: continue return None
Finds and returns the closest point to (x, y z) we'll only look in voxels within distanceLimit (distance in voxels)
src/mesher/voxel.py
closestPointTo
fhennecker/semiteleporter
0
python
def closestPointTo(self, point, distanceLimit=10, requiresDifferent=False): " Finds and returns the closest point to (x, y z) \n\t\t\twe'll only look in voxels within distanceLimit (distance in voxels)" (cx, cy, cz) = self.voxelIndexForPoint(point) for i in xrange(distanceLimit): points = self.pointsInVoxels(self.voxelsInLayer(cx, cy, cz, i, (i + 1))) if points: resList = sorted(points, key=point.distance) if (len(resList) == 0): continue if ((not requiresDifferent) or (requiresDifferent and (resList[0] != point))): return resList[0] elif (len(resList) > 1): return resList[1] else: continue return None
def closestPointTo(self, point, distanceLimit=10, requiresDifferent=False): " Finds and returns the closest point to (x, y z) \n\t\t\twe'll only look in voxels within distanceLimit (distance in voxels)" (cx, cy, cz) = self.voxelIndexForPoint(point) for i in xrange(distanceLimit): points = self.pointsInVoxels(self.voxelsInLayer(cx, cy, cz, i, (i + 1))) if points: resList = sorted(points, key=point.distance) if (len(resList) == 0): continue if ((not requiresDifferent) or (requiresDifferent and (resList[0] != point))): return resList[0] elif (len(resList) > 1): return resList[1] else: continue return None<|docstring|>Finds and returns the closest point to (x, y z) we'll only look in voxels within distanceLimit (distance in voxels)<|endoftext|>
5b6d80eba48a6b9289c56418f2f0af704d5e92d6b045926600e94272efc4b12f
def _resource_from_cache_prefix(resource, cache): '\n Combine the resource name with the cache prefix (if any)\n ' if getattr(cache, 'key_prefix', None): name = '{} {}'.format(resource, cache.key_prefix) else: name = resource return name.lower()
Combine the resource name with the cache prefix (if any)
ddtrace/contrib/flask_cache/utils.py
_resource_from_cache_prefix
mastizada/dd-trace-py
308
python
def _resource_from_cache_prefix(resource, cache): '\n \n ' if getattr(cache, 'key_prefix', None): name = '{} {}'.format(resource, cache.key_prefix) else: name = resource return name.lower()
def _resource_from_cache_prefix(resource, cache): '\n \n ' if getattr(cache, 'key_prefix', None): name = '{} {}'.format(resource, cache.key_prefix) else: name = resource return name.lower()<|docstring|>Combine the resource name with the cache prefix (if any)<|endoftext|>
20af02246489561131c946fa6ce8f8544326e52773fb3b59bf6a64124649501a
def _extract_client(cache, write=False): '\n Get the client from the cache instance according to the current operation\n ' client = getattr(cache, '_client', None) if (client is None): client = getattr(cache, ('_write_client' if write else '_read_clients'), None) return client
Get the client from the cache instance according to the current operation
ddtrace/contrib/flask_cache/utils.py
_extract_client
mastizada/dd-trace-py
308
python
def _extract_client(cache, write=False): '\n \n ' client = getattr(cache, '_client', None) if (client is None): client = getattr(cache, ('_write_client' if write else '_read_clients'), None) return client
def _extract_client(cache, write=False): '\n \n ' client = getattr(cache, '_client', None) if (client is None): client = getattr(cache, ('_write_client' if write else '_read_clients'), None) return client<|docstring|>Get the client from the cache instance according to the current operation<|endoftext|>
6ebc36610aa38347ec60c6067185153a14f0e0d84d2c313afbe8aee9e8c37fd5
def _extract_conn_tags(client): '\n For the given client extracts connection tags\n ' tags = {} if hasattr(client, 'servers'): if (isinstance(client.servers, list) and (len(client.servers) > 0)): contact_point = client.servers[0].address tags[net.TARGET_HOST] = contact_point[0] tags[net.TARGET_PORT] = contact_point[1] elif hasattr(client, 'connection_pool'): redis_tags = extract_redis_tags(client.connection_pool.connection_kwargs) tags.update(**redis_tags) elif hasattr(client, 'addresses'): addrs = parse_addresses(client.addresses) if addrs: (_, host, port, _) = addrs[0] tags[net.TARGET_PORT] = port tags[net.TARGET_HOST] = host return tags
For the given client extracts connection tags
ddtrace/contrib/flask_cache/utils.py
_extract_conn_tags
mastizada/dd-trace-py
308
python
def _extract_conn_tags(client): '\n \n ' tags = {} if hasattr(client, 'servers'): if (isinstance(client.servers, list) and (len(client.servers) > 0)): contact_point = client.servers[0].address tags[net.TARGET_HOST] = contact_point[0] tags[net.TARGET_PORT] = contact_point[1] elif hasattr(client, 'connection_pool'): redis_tags = extract_redis_tags(client.connection_pool.connection_kwargs) tags.update(**redis_tags) elif hasattr(client, 'addresses'): addrs = parse_addresses(client.addresses) if addrs: (_, host, port, _) = addrs[0] tags[net.TARGET_PORT] = port tags[net.TARGET_HOST] = host return tags
def _extract_conn_tags(client): '\n \n ' tags = {} if hasattr(client, 'servers'): if (isinstance(client.servers, list) and (len(client.servers) > 0)): contact_point = client.servers[0].address tags[net.TARGET_HOST] = contact_point[0] tags[net.TARGET_PORT] = contact_point[1] elif hasattr(client, 'connection_pool'): redis_tags = extract_redis_tags(client.connection_pool.connection_kwargs) tags.update(**redis_tags) elif hasattr(client, 'addresses'): addrs = parse_addresses(client.addresses) if addrs: (_, host, port, _) = addrs[0] tags[net.TARGET_PORT] = port tags[net.TARGET_HOST] = host return tags<|docstring|>For the given client extracts connection tags<|endoftext|>
1dcd48a5441e5202d60b7a866ca703fef1b4e9bc610d3cc8418a62c2a6b0f594
def __init__(self, memory_size, alpha): ' Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n batch_size : int\n batch size to be selected by `select` method\n alpha: float\n exponent determine how much prioritization.\n Prob_i \\sim priority_i**alpha/sum(priority**alpha)\n ' self.tree = SumTree(memory_size) self.memory_size = memory_size self.alpha = alpha self.bonus_priority = 999 self.epsilon_priority = 1e-06 if (self.alpha == 0): self.bonus_priority = 0
Prioritized experience replay buffer initialization. Parameters ---------- memory_size : int sample size to be stored batch_size : int batch size to be selected by `select` method alpha: float exponent determine how much prioritization. Prob_i \sim priority_i**alpha/sum(priority**alpha)
teachDRL/teachers/algos/LPReplayBuffer.py
__init__
flowersteam/meta-acl
2
python
def __init__(self, memory_size, alpha): ' Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n batch_size : int\n batch size to be selected by `select` method\n alpha: float\n exponent determine how much prioritization.\n Prob_i \\sim priority_i**alpha/sum(priority**alpha)\n ' self.tree = SumTree(memory_size) self.memory_size = memory_size self.alpha = alpha self.bonus_priority = 999 self.epsilon_priority = 1e-06 if (self.alpha == 0): self.bonus_priority = 0
def __init__(self, memory_size, alpha): ' Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n batch_size : int\n batch size to be selected by `select` method\n alpha: float\n exponent determine how much prioritization.\n Prob_i \\sim priority_i**alpha/sum(priority**alpha)\n ' self.tree = SumTree(memory_size) self.memory_size = memory_size self.alpha = alpha self.bonus_priority = 999 self.epsilon_priority = 1e-06 if (self.alpha == 0): self.bonus_priority = 0<|docstring|>Prioritized experience replay buffer initialization. Parameters ---------- memory_size : int sample size to be stored batch_size : int batch size to be selected by `select` method alpha: float exponent determine how much prioritization. Prob_i \sim priority_i**alpha/sum(priority**alpha)<|endoftext|>
03fd6d610faf66df4d95cef9552ea1edbd5de58b3046018f66e030839d036b87
def add(self, data, priority): " Add new sample.\n\n Parameters\n ----------\n data : object\n new sample\n priority : float\n sample's priority\n " if (((priority / 100) >= self.bonus_priority) and (self.alpha != 0.0)): print('WARNING, YOUR BONUS PRIORITY IS TOO LOW') exit(0) self.tree.add(data, (((priority ** self.alpha) + self.bonus_priority) + self.epsilon_priority))
Add new sample. Parameters ---------- data : object new sample priority : float sample's priority
teachDRL/teachers/algos/LPReplayBuffer.py
add
flowersteam/meta-acl
2
python
def add(self, data, priority): " Add new sample.\n\n Parameters\n ----------\n data : object\n new sample\n priority : float\n sample's priority\n " if (((priority / 100) >= self.bonus_priority) and (self.alpha != 0.0)): print('WARNING, YOUR BONUS PRIORITY IS TOO LOW') exit(0) self.tree.add(data, (((priority ** self.alpha) + self.bonus_priority) + self.epsilon_priority))
def add(self, data, priority): " Add new sample.\n\n Parameters\n ----------\n data : object\n new sample\n priority : float\n sample's priority\n " if (((priority / 100) >= self.bonus_priority) and (self.alpha != 0.0)): print('WARNING, YOUR BONUS PRIORITY IS TOO LOW') exit(0) self.tree.add(data, (((priority ** self.alpha) + self.bonus_priority) + self.epsilon_priority))<|docstring|>Add new sample. Parameters ---------- data : object new sample priority : float sample's priority<|endoftext|>
d06178b5dacf142e0d69e0ae82a45c1210c6a9d26bd90ec39b533bd67ddeec49
def select(self, batch_size): ' The method return samples randomly.\n\n Parameters\n ----------\n beta : float\n\n Returns\n -------\n out :\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n ' out = [] indices = [] priorities = [] avoid_resampling = False for _ in range(batch_size): r = random.random() (data, priority, index) = self.tree.find(r) priorities.append(priority) indices.append(index) out.append(data) if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) for i in range(len(priorities)): if (priorities[i] >= self.bonus_priority): priorities[i] -= self.bonus_priority self.priority_update([indices[i]], [priorities[i]]) return (out, indices)
The method return samples randomly. Parameters ---------- beta : float Returns ------- out : list of samples weights: list of weight indices: list of sample indices The indices indicate sample positions in a sum tree.
teachDRL/teachers/algos/LPReplayBuffer.py
select
flowersteam/meta-acl
2
python
def select(self, batch_size): ' The method return samples randomly.\n\n Parameters\n ----------\n beta : float\n\n Returns\n -------\n out :\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n ' out = [] indices = [] priorities = [] avoid_resampling = False for _ in range(batch_size): r = random.random() (data, priority, index) = self.tree.find(r) priorities.append(priority) indices.append(index) out.append(data) if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) for i in range(len(priorities)): if (priorities[i] >= self.bonus_priority): priorities[i] -= self.bonus_priority self.priority_update([indices[i]], [priorities[i]]) return (out, indices)
def select(self, batch_size): ' The method return samples randomly.\n\n Parameters\n ----------\n beta : float\n\n Returns\n -------\n out :\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n ' out = [] indices = [] priorities = [] avoid_resampling = False for _ in range(batch_size): r = random.random() (data, priority, index) = self.tree.find(r) priorities.append(priority) indices.append(index) out.append(data) if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) for i in range(len(priorities)): if (priorities[i] >= self.bonus_priority): priorities[i] -= self.bonus_priority self.priority_update([indices[i]], [priorities[i]]) return (out, indices)<|docstring|>The method return samples randomly. Parameters ---------- beta : float Returns ------- out : list of samples weights: list of weight indices: list of sample indices The indices indicate sample positions in a sum tree.<|endoftext|>
5160890ef1ef07a8a88f1551ea3540bfd943accd26d29b56faace5f24aacc022
def priority_update(self, indices, priorities): " The methods update samples's priority.\n\n Parameters\n ----------\n indices :\n list of sample indices\n " for (i, p) in zip(indices, priorities): self.tree.val_update(i, (p ** self.alpha))
The methods update samples's priority. Parameters ---------- indices : list of sample indices
teachDRL/teachers/algos/LPReplayBuffer.py
priority_update
flowersteam/meta-acl
2
python
def priority_update(self, indices, priorities): " The methods update samples's priority.\n\n Parameters\n ----------\n indices :\n list of sample indices\n " for (i, p) in zip(indices, priorities): self.tree.val_update(i, (p ** self.alpha))
def priority_update(self, indices, priorities): " The methods update samples's priority.\n\n Parameters\n ----------\n indices :\n list of sample indices\n " for (i, p) in zip(indices, priorities): self.tree.val_update(i, (p ** self.alpha))<|docstring|>The methods update samples's priority. Parameters ---------- indices : list of sample indices<|endoftext|>
07d7fd20eedc087c8e48a85d4b9cbf32b18b781677cc71547c2637634e6f3c01
def reset_alpha(self, alpha): ' Reset a exponent alpha.\n Parameters\n ----------\n alpha : float\n ' (self.alpha, old_alpha) = (alpha, self.alpha) priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())] self.priority_update(range(self.tree.filled_size()), priorities)
Reset a exponent alpha. Parameters ---------- alpha : float
teachDRL/teachers/algos/LPReplayBuffer.py
reset_alpha
flowersteam/meta-acl
2
python
def reset_alpha(self, alpha): ' Reset a exponent alpha.\n Parameters\n ----------\n alpha : float\n ' (self.alpha, old_alpha) = (alpha, self.alpha) priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())] self.priority_update(range(self.tree.filled_size()), priorities)
def reset_alpha(self, alpha): ' Reset a exponent alpha.\n Parameters\n ----------\n alpha : float\n ' (self.alpha, old_alpha) = (alpha, self.alpha) priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())] self.priority_update(range(self.tree.filled_size()), priorities)<|docstring|>Reset a exponent alpha. Parameters ---------- alpha : float<|endoftext|>
da9fcb3b0c8030780ba01b8d7b7ca5e3dba2930f51c6171fc291d646ac5e3332
def create_model(name: str, version: Optional[int]=None, metrics: Optional[dict]=None, description: Optional[str]=None, input_example: Optional[Union[(pandas.DataFrame, pandas.Series, numpy.ndarray, list)]]=None, model_schema: Optional[ModelSchema]=None): 'Create an SkLearn model metadata object.\n\n !!! note "Lazy"\n This method is lazy and does not persist any metadata or uploads model artifacts in the\n model registry on its own. To save the model object and the model artifacts, call the `save()` method with a\n local file path to the directory containing the model artifacts.\n\n # Arguments\n name: Name of the model to create.\n version: Optionally version of the model to create, defaults to `None` and\n will create the model with incremented version from the last\n version in the model registry.\n description: Optionally a string describing the model, defaults to empty string\n `""`.\n input_example: Optionally an input example that represents inputs for the model, defaults to `None`.\n model_schema: Optionally a model schema for the model inputs and/or outputs.\n\n # Returns\n `Model`. The model metadata object.\n ' model = Model(id=None, name=name, version=version, description=description, metrics=metrics, input_example=input_example, model_schema=model_schema) model._shared_registry_project_name = _mr.shared_registry_project_name model._model_registry_id = _mr.model_registry_id return model
Create an SkLearn model metadata object. !!! note "Lazy" This method is lazy and does not persist any metadata or uploads model artifacts in the model registry on its own. To save the model object and the model artifacts, call the `save()` method with a local file path to the directory containing the model artifacts. # Arguments name: Name of the model to create. version: Optionally version of the model to create, defaults to `None` and will create the model with incremented version from the last version in the model registry. description: Optionally a string describing the model, defaults to empty string `""`. input_example: Optionally an input example that represents inputs for the model, defaults to `None`. model_schema: Optionally a model schema for the model inputs and/or outputs. # Returns `Model`. The model metadata object.
python/hsml/sklearn/signature.py
create_model
javierdlrm/machine-learning-api
8
python
def create_model(name: str, version: Optional[int]=None, metrics: Optional[dict]=None, description: Optional[str]=None, input_example: Optional[Union[(pandas.DataFrame, pandas.Series, numpy.ndarray, list)]]=None, model_schema: Optional[ModelSchema]=None): 'Create an SkLearn model metadata object.\n\n !!! note "Lazy"\n This method is lazy and does not persist any metadata or uploads model artifacts in the\n model registry on its own. To save the model object and the model artifacts, call the `save()` method with a\n local file path to the directory containing the model artifacts.\n\n # Arguments\n name: Name of the model to create.\n version: Optionally version of the model to create, defaults to `None` and\n will create the model with incremented version from the last\n version in the model registry.\n description: Optionally a string describing the model, defaults to empty string\n ``.\n input_example: Optionally an input example that represents inputs for the model, defaults to `None`.\n model_schema: Optionally a model schema for the model inputs and/or outputs.\n\n # Returns\n `Model`. The model metadata object.\n ' model = Model(id=None, name=name, version=version, description=description, metrics=metrics, input_example=input_example, model_schema=model_schema) model._shared_registry_project_name = _mr.shared_registry_project_name model._model_registry_id = _mr.model_registry_id return model
def create_model(name: str, version: Optional[int]=None, metrics: Optional[dict]=None, description: Optional[str]=None, input_example: Optional[Union[(pandas.DataFrame, pandas.Series, numpy.ndarray, list)]]=None, model_schema: Optional[ModelSchema]=None): 'Create an SkLearn model metadata object.\n\n !!! note "Lazy"\n This method is lazy and does not persist any metadata or uploads model artifacts in the\n model registry on its own. To save the model object and the model artifacts, call the `save()` method with a\n local file path to the directory containing the model artifacts.\n\n # Arguments\n name: Name of the model to create.\n version: Optionally version of the model to create, defaults to `None` and\n will create the model with incremented version from the last\n version in the model registry.\n description: Optionally a string describing the model, defaults to empty string\n ``.\n input_example: Optionally an input example that represents inputs for the model, defaults to `None`.\n model_schema: Optionally a model schema for the model inputs and/or outputs.\n\n # Returns\n `Model`. The model metadata object.\n ' model = Model(id=None, name=name, version=version, description=description, metrics=metrics, input_example=input_example, model_schema=model_schema) model._shared_registry_project_name = _mr.shared_registry_project_name model._model_registry_id = _mr.model_registry_id return model<|docstring|>Create an SkLearn model metadata object. !!! note "Lazy" This method is lazy and does not persist any metadata or uploads model artifacts in the model registry on its own. To save the model object and the model artifacts, call the `save()` method with a local file path to the directory containing the model artifacts. # Arguments name: Name of the model to create. version: Optionally version of the model to create, defaults to `None` and will create the model with incremented version from the last version in the model registry. description: Optionally a string describing the model, defaults to empty string `""`. input_example: Optionally an input example that represents inputs for the model, defaults to `None`. model_schema: Optionally a model schema for the model inputs and/or outputs. # Returns `Model`. The model metadata object.<|endoftext|>
2811c5c87d3f5c36782f1ffb572b8821ac9f4a5b0769c8540fca8bab4e02e826
def data_labels(data_path, label): '\n Filters files with the appropriate label from the data csv file\n :param data_path:\n :param label: data label default values is gender: possible values includes [gender, age, country]\n :return:\n ' label_data = pd.read_csv(os.path.join(data_path, 'data.csv')) label_data = label_data[label_data[label].notna()] label_data = label_data[(label_data[label] != 'other')] return label_data
Filters files with the appropriate label from the data csv file :param data_path: :param label: data label default values is gender: possible values includes [gender, age, country] :return:
audio_model/preprocessing/mp3_parser.py
data_labels
dachosen1/Common-Voice
11
python
def data_labels(data_path, label): '\n Filters files with the appropriate label from the data csv file\n :param data_path:\n :param label: data label default values is gender: possible values includes [gender, age, country]\n :return:\n ' label_data = pd.read_csv(os.path.join(data_path, 'data.csv')) label_data = label_data[label_data[label].notna()] label_data = label_data[(label_data[label] != 'other')] return label_data
def data_labels(data_path, label): '\n Filters files with the appropriate label from the data csv file\n :param data_path:\n :param label: data label default values is gender: possible values includes [gender, age, country]\n :return:\n ' label_data = pd.read_csv(os.path.join(data_path, 'data.csv')) label_data = label_data[label_data[label].notna()] label_data = label_data[(label_data[label] != 'other')] return label_data<|docstring|>Filters files with the appropriate label from the data csv file :param data_path: :param label: data label default values is gender: possible values includes [gender, age, country] :return:<|endoftext|>
1281fd6756e9e42d275e0533104e2212539653cd28ab53414ac0b563fd4114fa
@command_component(environment='../conda.yaml') def basic_module(port1: str, param1: int): ' module run logic goes here ' return port1
module run logic goes here
sdk/ml/azure-ai-ml/tests/test_configs/dsl_component/dsl_component_with_env/inner_folder/basic_component.py
basic_module
jalauzon-msft/azure-sdk-for-python
1
python
@command_component(environment='../conda.yaml') def basic_module(port1: str, param1: int): ' ' return port1
@command_component(environment='../conda.yaml') def basic_module(port1: str, param1: int): ' ' return port1<|docstring|>module run logic goes here<|endoftext|>
8002010d963d7b9fb7539777c5c80baaaec7d2e24e51ac812af37be62a5d3709
def test_longArrays(): '\n Test config saving and loading of long arrays.\n ' tmp = tempfile.mktemp('.cfg') arr = np.arange(20) configfile.writeConfigFile({'arr': arr}, tmp) config = configfile.readConfigFile(tmp) assert all((config['arr'] == arr)) os.remove(tmp)
Test config saving and loading of long arrays.
pyqtgraph/tests/test_configparser.py
test_longArrays
3mrrrx/slim2voice
150
python
def test_longArrays(): '\n \n ' tmp = tempfile.mktemp('.cfg') arr = np.arange(20) configfile.writeConfigFile({'arr': arr}, tmp) config = configfile.readConfigFile(tmp) assert all((config['arr'] == arr)) os.remove(tmp)
def test_longArrays(): '\n \n ' tmp = tempfile.mktemp('.cfg') arr = np.arange(20) configfile.writeConfigFile({'arr': arr}, tmp) config = configfile.readConfigFile(tmp) assert all((config['arr'] == arr)) os.remove(tmp)<|docstring|>Test config saving and loading of long arrays.<|endoftext|>
23a2886326c86750efd2cf0cb45e9ed5ec4ff978f537f3c41be4a8845d24825f
def test_multipleParameters(): '\n Test config saving and loading of multiple parameters.\n ' tmp = tempfile.mktemp('.cfg') par1 = [1, 2, 3] par2 = 'Test' par3 = {'a': 3, 'b': 'c'} configfile.writeConfigFile({'par1': par1, 'par2': par2, 'par3': par3}, tmp) config = configfile.readConfigFile(tmp) assert (config['par1'] == par1) assert (config['par2'] == par2) assert (config['par3'] == par3) os.remove(tmp)
Test config saving and loading of multiple parameters.
pyqtgraph/tests/test_configparser.py
test_multipleParameters
3mrrrx/slim2voice
150
python
def test_multipleParameters(): '\n \n ' tmp = tempfile.mktemp('.cfg') par1 = [1, 2, 3] par2 = 'Test' par3 = {'a': 3, 'b': 'c'} configfile.writeConfigFile({'par1': par1, 'par2': par2, 'par3': par3}, tmp) config = configfile.readConfigFile(tmp) assert (config['par1'] == par1) assert (config['par2'] == par2) assert (config['par3'] == par3) os.remove(tmp)
def test_multipleParameters(): '\n \n ' tmp = tempfile.mktemp('.cfg') par1 = [1, 2, 3] par2 = 'Test' par3 = {'a': 3, 'b': 'c'} configfile.writeConfigFile({'par1': par1, 'par2': par2, 'par3': par3}, tmp) config = configfile.readConfigFile(tmp) assert (config['par1'] == par1) assert (config['par2'] == par2) assert (config['par3'] == par3) os.remove(tmp)<|docstring|>Test config saving and loading of multiple parameters.<|endoftext|>
bbe7adf5acbf5b8af5048a035c4b155074392d9a57f290e22501cf2cf436fa26
@classmethod def get_elements(cls): '\n Store singleton instance on IO to speed up retrieval after first call.\n ' if (not hasattr(cls, '_cache')): cls._cache = cls() return cls._cache
Store singleton instance on IO to speed up retrieval after first call.
climata/snotel/__init__.py
get_elements
gauchm/climata
67
python
@classmethod def get_elements(cls): '\n \n ' if (not hasattr(cls, '_cache')): cls._cache = cls() return cls._cache
@classmethod def get_elements(cls): '\n \n ' if (not hasattr(cls, '_cache')): cls._cache = cls() return cls._cache<|docstring|>Store singleton instance on IO to speed up retrieval after first call.<|endoftext|>
487209f3d9fe5affdad487629ae4244e4915d972ddd0bd9d451f4b044c904b8f
@staticmethod def inject_into(target_object: Any, target_function_name: str) -> Callable: 'inject_into(target_object, target_function_name)\n\n .. warning:: This function is DEPRECATED. Use :func:`~inject_safely_into` instead.\n\n ' return CommonInjectionUtils.inject_safely_into(None, target_object, target_function_name)
inject_into(target_object, target_function_name) .. warning:: This function is DEPRECATED. Use :func:`~inject_safely_into` instead.
src/sims4communitylib/utils/common_injection_utils.py
inject_into
velocist/TS4CheatsInfo
0
python
@staticmethod def inject_into(target_object: Any, target_function_name: str) -> Callable: 'inject_into(target_object, target_function_name)\n\n .. warning:: This function is DEPRECATED. Use :func:`~inject_safely_into` instead.\n\n ' return CommonInjectionUtils.inject_safely_into(None, target_object, target_function_name)
@staticmethod def inject_into(target_object: Any, target_function_name: str) -> Callable: 'inject_into(target_object, target_function_name)\n\n .. warning:: This function is DEPRECATED. Use :func:`~inject_safely_into` instead.\n\n ' return CommonInjectionUtils.inject_safely_into(None, target_object, target_function_name)<|docstring|>inject_into(target_object, target_function_name) .. warning:: This function is DEPRECATED. Use :func:`~inject_safely_into` instead.<|endoftext|>
3c580eeca31f5b5de7987c751b43fffa1fc8c3d208727ed20c6e718fcf690cb3
@staticmethod def inject_safely_into(mod_identity: CommonModIdentity, target_object: Any, target_function_name: str, handle_exceptions: bool=True) -> Callable: "inject_safely_into(mod_identity, target_object, target_function_name, handle_exceptions=True)\n\n A decorator used to inject code into a function.\n It will run the original function should any problems occur.\n If handle_exceptions is True, it will catch and log exceptions.\n\n :Example of cls usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # cls usage\n @CommonInjectionUtils.inject_safely_into(SimSpawner, SimSpawner.spawn_sim._name__)\n def do_custom_spawn_sim(original, cls, *args, **kwargs):\n return original(*args, **kwargs)\n\n :Example of self usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # Self usage\n @CommonInjectionUtils.inject_safely_into(SimInfo, SimInfo.load_sim_info.__name__)\n def do_custom_load_sim_info(original, self, *args, **kwargs):\n return original(self, *args, **kwargs)\n\n .. note::\n\n Injection WILL work on\n\n - Functions decorated with 'property'\n - Functions decorated with 'classmethod'\n - Functions decorated with 'staticmethod'\n - Functions with 'cls' or 'self' as the first argument.\n\n .. note::\n\n Injection WILL NOT work on\n\n - Global functions, i.e. Functions not contained within a class.\n\n :param mod_identity: The identity of the Mod that is injecting custom code.\n :type mod_identity: CommonModIdentity\n :param target_object: The class that contains the target function.\n :type target_object: Any\n :param target_function_name: The name of the function being injected to.\n :type target_function_name: str\n :param handle_exceptions: If set to True, any exceptions thrown by the wrapped function will be handled. If set to False, any exceptions thrown by the wrapped function will not be caught. Default is True.\n :type handle_exceptions: bool, optional\n :return: A wrapped function.\n :rtype: Callable\n " if handle_exceptions: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: try: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: try: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) except Exception as ex: try: from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler CommonExceptionHandler.log_exception(mod_identity, "Error occurred while injecting into function '{}' of class '{}'".format(new_function.__name__, target_object.__name__), exception=ex) except Exception: pass return original_function(*args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) if (type(original_function) is property): return property(_wrapped_function) return _wrapped_function except: def _func(*_, **__) -> Any: pass return _func else: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) elif (type(original_function) is property): return property(_wrapped_function) return _wrapped_function def _injected(wrap_function) -> Any: original_function = getattr(target_object, str(target_function_name)) setattr(target_object, str(target_function_name), _function_wrapper(original_function, wrap_function)) return wrap_function return _injected
inject_safely_into(mod_identity, target_object, target_function_name, handle_exceptions=True) A decorator used to inject code into a function. It will run the original function should any problems occur. If handle_exceptions is True, it will catch and log exceptions. :Example of cls usage: .. highlight:: python .. code-block:: python # cls usage @CommonInjectionUtils.inject_safely_into(SimSpawner, SimSpawner.spawn_sim._name__) def do_custom_spawn_sim(original, cls, *args, **kwargs): return original(*args, **kwargs) :Example of self usage: .. highlight:: python .. code-block:: python # Self usage @CommonInjectionUtils.inject_safely_into(SimInfo, SimInfo.load_sim_info.__name__) def do_custom_load_sim_info(original, self, *args, **kwargs): return original(self, *args, **kwargs) .. note:: Injection WILL work on - Functions decorated with 'property' - Functions decorated with 'classmethod' - Functions decorated with 'staticmethod' - Functions with 'cls' or 'self' as the first argument. .. note:: Injection WILL NOT work on - Global functions, i.e. Functions not contained within a class. :param mod_identity: The identity of the Mod that is injecting custom code. :type mod_identity: CommonModIdentity :param target_object: The class that contains the target function. :type target_object: Any :param target_function_name: The name of the function being injected to. :type target_function_name: str :param handle_exceptions: If set to True, any exceptions thrown by the wrapped function will be handled. If set to False, any exceptions thrown by the wrapped function will not be caught. Default is True. :type handle_exceptions: bool, optional :return: A wrapped function. :rtype: Callable
src/sims4communitylib/utils/common_injection_utils.py
inject_safely_into
velocist/TS4CheatsInfo
0
python
@staticmethod def inject_safely_into(mod_identity: CommonModIdentity, target_object: Any, target_function_name: str, handle_exceptions: bool=True) -> Callable: "inject_safely_into(mod_identity, target_object, target_function_name, handle_exceptions=True)\n\n A decorator used to inject code into a function.\n It will run the original function should any problems occur.\n If handle_exceptions is True, it will catch and log exceptions.\n\n :Example of cls usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # cls usage\n @CommonInjectionUtils.inject_safely_into(SimSpawner, SimSpawner.spawn_sim._name__)\n def do_custom_spawn_sim(original, cls, *args, **kwargs):\n return original(*args, **kwargs)\n\n :Example of self usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # Self usage\n @CommonInjectionUtils.inject_safely_into(SimInfo, SimInfo.load_sim_info.__name__)\n def do_custom_load_sim_info(original, self, *args, **kwargs):\n return original(self, *args, **kwargs)\n\n .. note::\n\n Injection WILL work on\n\n - Functions decorated with 'property'\n - Functions decorated with 'classmethod'\n - Functions decorated with 'staticmethod'\n - Functions with 'cls' or 'self' as the first argument.\n\n .. note::\n\n Injection WILL NOT work on\n\n - Global functions, i.e. Functions not contained within a class.\n\n :param mod_identity: The identity of the Mod that is injecting custom code.\n :type mod_identity: CommonModIdentity\n :param target_object: The class that contains the target function.\n :type target_object: Any\n :param target_function_name: The name of the function being injected to.\n :type target_function_name: str\n :param handle_exceptions: If set to True, any exceptions thrown by the wrapped function will be handled. If set to False, any exceptions thrown by the wrapped function will not be caught. Default is True.\n :type handle_exceptions: bool, optional\n :return: A wrapped function.\n :rtype: Callable\n " if handle_exceptions: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: try: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: try: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) except Exception as ex: try: from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler CommonExceptionHandler.log_exception(mod_identity, "Error occurred while injecting into function '{}' of class '{}'".format(new_function.__name__, target_object.__name__), exception=ex) except Exception: pass return original_function(*args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) if (type(original_function) is property): return property(_wrapped_function) return _wrapped_function except: def _func(*_, **__) -> Any: pass return _func else: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) elif (type(original_function) is property): return property(_wrapped_function) return _wrapped_function def _injected(wrap_function) -> Any: original_function = getattr(target_object, str(target_function_name)) setattr(target_object, str(target_function_name), _function_wrapper(original_function, wrap_function)) return wrap_function return _injected
@staticmethod def inject_safely_into(mod_identity: CommonModIdentity, target_object: Any, target_function_name: str, handle_exceptions: bool=True) -> Callable: "inject_safely_into(mod_identity, target_object, target_function_name, handle_exceptions=True)\n\n A decorator used to inject code into a function.\n It will run the original function should any problems occur.\n If handle_exceptions is True, it will catch and log exceptions.\n\n :Example of cls usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # cls usage\n @CommonInjectionUtils.inject_safely_into(SimSpawner, SimSpawner.spawn_sim._name__)\n def do_custom_spawn_sim(original, cls, *args, **kwargs):\n return original(*args, **kwargs)\n\n :Example of self usage:\n\n .. highlight:: python\n .. code-block:: python\n\n # Self usage\n @CommonInjectionUtils.inject_safely_into(SimInfo, SimInfo.load_sim_info.__name__)\n def do_custom_load_sim_info(original, self, *args, **kwargs):\n return original(self, *args, **kwargs)\n\n .. note::\n\n Injection WILL work on\n\n - Functions decorated with 'property'\n - Functions decorated with 'classmethod'\n - Functions decorated with 'staticmethod'\n - Functions with 'cls' or 'self' as the first argument.\n\n .. note::\n\n Injection WILL NOT work on\n\n - Global functions, i.e. Functions not contained within a class.\n\n :param mod_identity: The identity of the Mod that is injecting custom code.\n :type mod_identity: CommonModIdentity\n :param target_object: The class that contains the target function.\n :type target_object: Any\n :param target_function_name: The name of the function being injected to.\n :type target_function_name: str\n :param handle_exceptions: If set to True, any exceptions thrown by the wrapped function will be handled. If set to False, any exceptions thrown by the wrapped function will not be caught. Default is True.\n :type handle_exceptions: bool, optional\n :return: A wrapped function.\n :rtype: Callable\n " if handle_exceptions: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: try: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: try: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) except Exception as ex: try: from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler CommonExceptionHandler.log_exception(mod_identity, "Error occurred while injecting into function '{}' of class '{}'".format(new_function.__name__, target_object.__name__), exception=ex) except Exception: pass return original_function(*args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) if (type(original_function) is property): return property(_wrapped_function) return _wrapped_function except: def _func(*_, **__) -> Any: pass return _func else: def _function_wrapper(original_function, new_function: Callable[(..., Any)]) -> Any: @wraps(original_function) def _wrapped_function(*args, **kwargs) -> Any: if (type(original_function) is property): return new_function(original_function.fget, *args, **kwargs) return new_function(original_function, *args, **kwargs) if inspect.ismethod(original_function): return classmethod(_wrapped_function) elif (type(original_function) is property): return property(_wrapped_function) return _wrapped_function def _injected(wrap_function) -> Any: original_function = getattr(target_object, str(target_function_name)) setattr(target_object, str(target_function_name), _function_wrapper(original_function, wrap_function)) return wrap_function return _injected<|docstring|>inject_safely_into(mod_identity, target_object, target_function_name, handle_exceptions=True) A decorator used to inject code into a function. It will run the original function should any problems occur. If handle_exceptions is True, it will catch and log exceptions. :Example of cls usage: .. highlight:: python .. code-block:: python # cls usage @CommonInjectionUtils.inject_safely_into(SimSpawner, SimSpawner.spawn_sim._name__) def do_custom_spawn_sim(original, cls, *args, **kwargs): return original(*args, **kwargs) :Example of self usage: .. highlight:: python .. code-block:: python # Self usage @CommonInjectionUtils.inject_safely_into(SimInfo, SimInfo.load_sim_info.__name__) def do_custom_load_sim_info(original, self, *args, **kwargs): return original(self, *args, **kwargs) .. note:: Injection WILL work on - Functions decorated with 'property' - Functions decorated with 'classmethod' - Functions decorated with 'staticmethod' - Functions with 'cls' or 'self' as the first argument. .. note:: Injection WILL NOT work on - Global functions, i.e. Functions not contained within a class. :param mod_identity: The identity of the Mod that is injecting custom code. :type mod_identity: CommonModIdentity :param target_object: The class that contains the target function. :type target_object: Any :param target_function_name: The name of the function being injected to. :type target_function_name: str :param handle_exceptions: If set to True, any exceptions thrown by the wrapped function will be handled. If set to False, any exceptions thrown by the wrapped function will not be caught. Default is True. :type handle_exceptions: bool, optional :return: A wrapped function. :rtype: Callable<|endoftext|>
4cb499aeeafcfcbd3fa20dab9ccf21d47351a687eefdbe068782d4efbb0309bc
def get_modified_date(parsed, raw): 'Return best possible guess to post modification timestamp.' if parsed: return feedparser_ts(parsed) if (not raw): return None (ts, val) = (None, raw.replace('_', ' ')) if (not ts): from subprocess import Popen, PIPE with open(os.devnull, 'w') as devnull: proc = Popen(['date', '+%s', '-d', val], stdout=PIPE, stderr=devnull) val = proc.stdout.read() if (not proc.wait()): ts = datetime.fromtimestamp(int(val.strip()), tz=timezone.utc) if ts: return ts raise ValueError('Unrecognized raw value format: {0!r}'.format(val))
Return best possible guess to post modification timestamp.
feedjack/fjupdate.py
get_modified_date
allo-/feedjack
2
python
def get_modified_date(parsed, raw): if parsed: return feedparser_ts(parsed) if (not raw): return None (ts, val) = (None, raw.replace('_', ' ')) if (not ts): from subprocess import Popen, PIPE with open(os.devnull, 'w') as devnull: proc = Popen(['date', '+%s', '-d', val], stdout=PIPE, stderr=devnull) val = proc.stdout.read() if (not proc.wait()): ts = datetime.fromtimestamp(int(val.strip()), tz=timezone.utc) if ts: return ts raise ValueError('Unrecognized raw value format: {0!r}'.format(val))
def get_modified_date(parsed, raw): if parsed: return feedparser_ts(parsed) if (not raw): return None (ts, val) = (None, raw.replace('_', ' ')) if (not ts): from subprocess import Popen, PIPE with open(os.devnull, 'w') as devnull: proc = Popen(['date', '+%s', '-d', val], stdout=PIPE, stderr=devnull) val = proc.stdout.read() if (not proc.wait()): ts = datetime.fromtimestamp(int(val.strip()), tz=timezone.utc) if ts: return ts raise ValueError('Unrecognized raw value format: {0!r}'.format(val))<|docstring|>Return best possible guess to post modification timestamp.<|endoftext|>
df94e7b574c7fc35fde9e99af4b24ec6ba276339d85b2624827a4da34bb81485
def process_entry(self, entry): 'Construct a Post from a feedparser entry and save/update it in db' from feedjack.models import Post, Tag post = Post(feed=self.feed) post.link = entry.get('link', self.feed.link) post.title = entry.get('title', post.link) post.guid = self._get_guid(entry) if ('author_detail' in entry): post.author = entry.author_detail.get('name', '') post.author_email = entry.author_detail.get('email', '') if (not post.author): post.author = entry.get('author', entry.get('creator', '')) if (not post.author_email): post.author_email = 'example@example.com' try: post.content = entry.content[0].value except: post.content = entry.get('summary', entry.get('description', '')) ts_parsed = ts_raw = None for k in self.post_timestamp_keys: try: post.date_modified = get_modified_date(entry.get('{0}_parsed'.format(k)), entry.get(k)) except ValueError as err: log.warn('Failed to process post timestamp: {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid)) if post.date_modified: break post.comments = entry.get('comments', '') enclosures = entry.get('enclosures', list()) if ('media_content' in entry): for mc in entry.media_content: if ('url' in mc): e = dict(href=mc['url'], medium=mc.get('medium', 'image')) else: e = entry.media_content e['type'] = 'application/x-media-content' enclosures.append(e) assert enclosures, enclosures post.enclosures = enclosures fcat = list() if entry.has_key('tags'): for tcat in entry.tags: qcat = (tcat.label if (tcat.label is not None) else tcat.term) if (not qcat): continue qcat = qcat.strip() if ((',' in qcat) or ('/' in qcat)): qcat = qcat.replace(',', '/').split('/') else: qcat = [qcat] for zcat in qcat: tagname = ' '.join(zcat.lower().split()).strip()[:255] if (not tagname): continue if (not Tag.objects.filter(name=tagname)): cobj = Tag(name=tagname) cobj.save() fcat.append(Tag.objects.get(name=tagname)) post_base_fields = 'title link guid author author_email'.split() log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join(([' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields] + ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))])))) if (post.guid in self.postdict): post_old = self.postdict[post.guid] changed = ((post_old.content != post.content) or (post.date_modified and (post_old.date_modified != post.date_modified))) if ((not self.feed.immutable) and changed): retval = ENTRY_UPDATED log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link)) for field in (post_base_fields + ['content', 'comments']): setattr(post_old, field, getattr(post, field)) post_old.date_modified = (post.date_modified or post_old.date_modified) post_old.tags.clear() for tcat in fcat: post_old.tags.add(tcat) post_old.save() else: retval = ENTRY_SAME log.extra(('[{0}] Post has not changed: {1}' if (not changed) else '[{0}] Post changed, but feed is marked as immutable: {1}').format(self.feed.id, post.link)) else: retval = ENTRY_NEW log.extra('[{0}] Saving new post: {1} (timestamp: {2})'.format(self.feed.id, post.guid, post.date_modified)) if ((not post.date_modified) and self.fpf): try: post.date_modified = get_modified_date((self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed')), (self.fpf.feed.get('modified') or self.fpf.get('modified'))) except ValueError as err: log.warn('Failed to process feed/http timestamp: {0} (feed_id: {1}, post_guid: {2}), falling back to "now"'.format(err, self.feed.id, post.guid)) if (not post.date_modified): post.date_modified = timezone.now() log.debug('[{0}] Using current time for post ({1}) timestamp'.format(self.feed.id, post.guid)) else: log.debug('[{0}] Using timestamp from feed/http for post ({1}): {2}'.format(self.feed.id, post.guid, post.date_modified)) if self.options.hidden: post.hidden = True try: post.save() except IntegrityError: log.error('IntegrityError while saving (supposedly) new post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post)) raise for tcat in fcat: post.tags.add(tcat) self.postdict[post.guid] = post return retval
Construct a Post from a feedparser entry and save/update it in db
feedjack/fjupdate.py
process_entry
allo-/feedjack
2
python
def process_entry(self, entry): from feedjack.models import Post, Tag post = Post(feed=self.feed) post.link = entry.get('link', self.feed.link) post.title = entry.get('title', post.link) post.guid = self._get_guid(entry) if ('author_detail' in entry): post.author = entry.author_detail.get('name', ) post.author_email = entry.author_detail.get('email', ) if (not post.author): post.author = entry.get('author', entry.get('creator', )) if (not post.author_email): post.author_email = 'example@example.com' try: post.content = entry.content[0].value except: post.content = entry.get('summary', entry.get('description', )) ts_parsed = ts_raw = None for k in self.post_timestamp_keys: try: post.date_modified = get_modified_date(entry.get('{0}_parsed'.format(k)), entry.get(k)) except ValueError as err: log.warn('Failed to process post timestamp: {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid)) if post.date_modified: break post.comments = entry.get('comments', ) enclosures = entry.get('enclosures', list()) if ('media_content' in entry): for mc in entry.media_content: if ('url' in mc): e = dict(href=mc['url'], medium=mc.get('medium', 'image')) else: e = entry.media_content e['type'] = 'application/x-media-content' enclosures.append(e) assert enclosures, enclosures post.enclosures = enclosures fcat = list() if entry.has_key('tags'): for tcat in entry.tags: qcat = (tcat.label if (tcat.label is not None) else tcat.term) if (not qcat): continue qcat = qcat.strip() if ((',' in qcat) or ('/' in qcat)): qcat = qcat.replace(',', '/').split('/') else: qcat = [qcat] for zcat in qcat: tagname = ' '.join(zcat.lower().split()).strip()[:255] if (not tagname): continue if (not Tag.objects.filter(name=tagname)): cobj = Tag(name=tagname) cobj.save() fcat.append(Tag.objects.get(name=tagname)) post_base_fields = 'title link guid author author_email'.split() log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join(([' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields] + ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))])))) if (post.guid in self.postdict): post_old = self.postdict[post.guid] changed = ((post_old.content != post.content) or (post.date_modified and (post_old.date_modified != post.date_modified))) if ((not self.feed.immutable) and changed): retval = ENTRY_UPDATED log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link)) for field in (post_base_fields + ['content', 'comments']): setattr(post_old, field, getattr(post, field)) post_old.date_modified = (post.date_modified or post_old.date_modified) post_old.tags.clear() for tcat in fcat: post_old.tags.add(tcat) post_old.save() else: retval = ENTRY_SAME log.extra(('[{0}] Post has not changed: {1}' if (not changed) else '[{0}] Post changed, but feed is marked as immutable: {1}').format(self.feed.id, post.link)) else: retval = ENTRY_NEW log.extra('[{0}] Saving new post: {1} (timestamp: {2})'.format(self.feed.id, post.guid, post.date_modified)) if ((not post.date_modified) and self.fpf): try: post.date_modified = get_modified_date((self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed')), (self.fpf.feed.get('modified') or self.fpf.get('modified'))) except ValueError as err: log.warn('Failed to process feed/http timestamp: {0} (feed_id: {1}, post_guid: {2}), falling back to "now"'.format(err, self.feed.id, post.guid)) if (not post.date_modified): post.date_modified = timezone.now() log.debug('[{0}] Using current time for post ({1}) timestamp'.format(self.feed.id, post.guid)) else: log.debug('[{0}] Using timestamp from feed/http for post ({1}): {2}'.format(self.feed.id, post.guid, post.date_modified)) if self.options.hidden: post.hidden = True try: post.save() except IntegrityError: log.error('IntegrityError while saving (supposedly) new post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post)) raise for tcat in fcat: post.tags.add(tcat) self.postdict[post.guid] = post return retval
def process_entry(self, entry): from feedjack.models import Post, Tag post = Post(feed=self.feed) post.link = entry.get('link', self.feed.link) post.title = entry.get('title', post.link) post.guid = self._get_guid(entry) if ('author_detail' in entry): post.author = entry.author_detail.get('name', ) post.author_email = entry.author_detail.get('email', ) if (not post.author): post.author = entry.get('author', entry.get('creator', )) if (not post.author_email): post.author_email = 'example@example.com' try: post.content = entry.content[0].value except: post.content = entry.get('summary', entry.get('description', )) ts_parsed = ts_raw = None for k in self.post_timestamp_keys: try: post.date_modified = get_modified_date(entry.get('{0}_parsed'.format(k)), entry.get(k)) except ValueError as err: log.warn('Failed to process post timestamp: {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid)) if post.date_modified: break post.comments = entry.get('comments', ) enclosures = entry.get('enclosures', list()) if ('media_content' in entry): for mc in entry.media_content: if ('url' in mc): e = dict(href=mc['url'], medium=mc.get('medium', 'image')) else: e = entry.media_content e['type'] = 'application/x-media-content' enclosures.append(e) assert enclosures, enclosures post.enclosures = enclosures fcat = list() if entry.has_key('tags'): for tcat in entry.tags: qcat = (tcat.label if (tcat.label is not None) else tcat.term) if (not qcat): continue qcat = qcat.strip() if ((',' in qcat) or ('/' in qcat)): qcat = qcat.replace(',', '/').split('/') else: qcat = [qcat] for zcat in qcat: tagname = ' '.join(zcat.lower().split()).strip()[:255] if (not tagname): continue if (not Tag.objects.filter(name=tagname)): cobj = Tag(name=tagname) cobj.save() fcat.append(Tag.objects.get(name=tagname)) post_base_fields = 'title link guid author author_email'.split() log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join(([' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields] + ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))])))) if (post.guid in self.postdict): post_old = self.postdict[post.guid] changed = ((post_old.content != post.content) or (post.date_modified and (post_old.date_modified != post.date_modified))) if ((not self.feed.immutable) and changed): retval = ENTRY_UPDATED log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link)) for field in (post_base_fields + ['content', 'comments']): setattr(post_old, field, getattr(post, field)) post_old.date_modified = (post.date_modified or post_old.date_modified) post_old.tags.clear() for tcat in fcat: post_old.tags.add(tcat) post_old.save() else: retval = ENTRY_SAME log.extra(('[{0}] Post has not changed: {1}' if (not changed) else '[{0}] Post changed, but feed is marked as immutable: {1}').format(self.feed.id, post.link)) else: retval = ENTRY_NEW log.extra('[{0}] Saving new post: {1} (timestamp: {2})'.format(self.feed.id, post.guid, post.date_modified)) if ((not post.date_modified) and self.fpf): try: post.date_modified = get_modified_date((self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed')), (self.fpf.feed.get('modified') or self.fpf.get('modified'))) except ValueError as err: log.warn('Failed to process feed/http timestamp: {0} (feed_id: {1}, post_guid: {2}), falling back to "now"'.format(err, self.feed.id, post.guid)) if (not post.date_modified): post.date_modified = timezone.now() log.debug('[{0}] Using current time for post ({1}) timestamp'.format(self.feed.id, post.guid)) else: log.debug('[{0}] Using timestamp from feed/http for post ({1}): {2}'.format(self.feed.id, post.guid, post.date_modified)) if self.options.hidden: post.hidden = True try: post.save() except IntegrityError: log.error('IntegrityError while saving (supposedly) new post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post)) raise for tcat in fcat: post.tags.add(tcat) self.postdict[post.guid] = post return retval<|docstring|>Construct a Post from a feedparser entry and save/update it in db<|endoftext|>
460ee6eef6393ed6cce787943889149ae0e7e1909f52871592e2713afb89a132
def _process(self): 'Downloads and parses a feed.' ret_values = {ENTRY_NEW: 0, ENTRY_UPDATED: 0, ENTRY_SAME: 0, ENTRY_ERR: 0} report_errors = ((not self.options.report_after) or (not self.feed.last_checked) or ((self.feed.last_checked + self.options.report_after) < timezone.now())) feedparser_kws = dict() if ((sys.hexversion >= 34015488) and (not self.feed.verify_tls_certs)): import urllib2, ssl ctx = ssl.create_default_context() (ctx.check_hostname, ctx.verify_mode) = (False, ssl.CERT_NONE) feedparser_kws['handlers'] = [urllib2.HTTPSHandler(context=ctx)] try: self.fpf = feedparser.parse(self.feed.feed_url, agent=USER_AGENT, etag=(self.feed.etag if (not self.options.force) else ''), **feedparser_kws) except KeyboardInterrupt: raise except: if report_errors: log.error('Feed cannot be parsed: {0} (#{1})'.format(self.feed.feed_url, self.feed.id)) return (FEED_ERRPARSE, ret_values) if hasattr(self.fpf, 'status'): log.extra('[{0}] HTTP status {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) if (self.fpf.status == 304): log.extra('[{0}] Feed has not changed since last check: {1}'.format(self.feed.id, self.feed.feed_url)) self.feed.last_checked = timezone.now() self.feed.save() return (FEED_SAME, ret_values) if (self.fpf.status >= 400): if report_errors: log.warn('[{0}] HTTP error {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) return (FEED_ERRFETCH, ret_values) if self.fpf.bozo: bozo = getattr(self.fpf, 'bozo_exception', 'unknown error') if (not self.feed.skip_errors): if report_errors: log.warn('[{0}] Failed to fetch feed: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) return (FEED_ERRFETCH, ret_values) elif report_errors: log.info('[{0}] Skipped feed error: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) self.feed.title = self.fpf.feed.get('title', '')[:200] self.feed.tagline = self.fpf.feed.get('tagline', '') self.feed.link = self.fpf.feed.get('link', '') self.feed.last_checked = timezone.now() log.debug('[{0}] Feed info for: {1}\n{2}'.format(self.feed.id, self.feed.feed_url, '\n'.join((' {0}: {1}'.format(key, getattr(self.feed, key)) for key in ['title', 'tagline', 'link', 'last_checked'])))) guids = filter(None, it.imap(self._get_guid, self.fpf.entries)) if guids: from feedjack.models import Post self.postdict = dict(((post.guid, post) for post in Post.objects.filter(feed=self.feed.id, guid__in=guids))) if self.options.max_diff: if ((not self.postdict) and (Post.objects.filter(feed=self.feed.id).count() == 0)): diff = 0 else: diff = (op.truediv((len(guids) - len(self.postdict)), len(guids)) * 100) if (diff > self.options.max_diff): log.warn('[{0}] Feed validation failed: {1} (diff: {2}% > {3}%)'.format(self.feed.id, self.feed.feed_url, round(diff, 1), self.options.max_diff)) return (FEED_INVALID, ret_values) else: self.postdict = dict() self.feed.save() for entry in self.fpf.entries: try: with transaction.atomic(): ret_entry = self.process_entry(entry) except: print_exc(self.feed.id) ret_entry = ENTRY_ERR ret_values[ret_entry] += 1 if (not ret_values[ENTRY_ERR]): self.feed.etag = (self.fpf.get('etag') or '') try: self.feed.last_modified = feedparser_ts(self.fpf.modified_parsed) except AttributeError: pass self.feed.save() return ((FEED_OK if (ret_values[ENTRY_NEW] or ret_values[ENTRY_UPDATED]) else FEED_SAME), ret_values)
Downloads and parses a feed.
feedjack/fjupdate.py
_process
allo-/feedjack
2
python
def _process(self): ret_values = {ENTRY_NEW: 0, ENTRY_UPDATED: 0, ENTRY_SAME: 0, ENTRY_ERR: 0} report_errors = ((not self.options.report_after) or (not self.feed.last_checked) or ((self.feed.last_checked + self.options.report_after) < timezone.now())) feedparser_kws = dict() if ((sys.hexversion >= 34015488) and (not self.feed.verify_tls_certs)): import urllib2, ssl ctx = ssl.create_default_context() (ctx.check_hostname, ctx.verify_mode) = (False, ssl.CERT_NONE) feedparser_kws['handlers'] = [urllib2.HTTPSHandler(context=ctx)] try: self.fpf = feedparser.parse(self.feed.feed_url, agent=USER_AGENT, etag=(self.feed.etag if (not self.options.force) else ), **feedparser_kws) except KeyboardInterrupt: raise except: if report_errors: log.error('Feed cannot be parsed: {0} (#{1})'.format(self.feed.feed_url, self.feed.id)) return (FEED_ERRPARSE, ret_values) if hasattr(self.fpf, 'status'): log.extra('[{0}] HTTP status {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) if (self.fpf.status == 304): log.extra('[{0}] Feed has not changed since last check: {1}'.format(self.feed.id, self.feed.feed_url)) self.feed.last_checked = timezone.now() self.feed.save() return (FEED_SAME, ret_values) if (self.fpf.status >= 400): if report_errors: log.warn('[{0}] HTTP error {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) return (FEED_ERRFETCH, ret_values) if self.fpf.bozo: bozo = getattr(self.fpf, 'bozo_exception', 'unknown error') if (not self.feed.skip_errors): if report_errors: log.warn('[{0}] Failed to fetch feed: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) return (FEED_ERRFETCH, ret_values) elif report_errors: log.info('[{0}] Skipped feed error: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) self.feed.title = self.fpf.feed.get('title', )[:200] self.feed.tagline = self.fpf.feed.get('tagline', ) self.feed.link = self.fpf.feed.get('link', ) self.feed.last_checked = timezone.now() log.debug('[{0}] Feed info for: {1}\n{2}'.format(self.feed.id, self.feed.feed_url, '\n'.join((' {0}: {1}'.format(key, getattr(self.feed, key)) for key in ['title', 'tagline', 'link', 'last_checked'])))) guids = filter(None, it.imap(self._get_guid, self.fpf.entries)) if guids: from feedjack.models import Post self.postdict = dict(((post.guid, post) for post in Post.objects.filter(feed=self.feed.id, guid__in=guids))) if self.options.max_diff: if ((not self.postdict) and (Post.objects.filter(feed=self.feed.id).count() == 0)): diff = 0 else: diff = (op.truediv((len(guids) - len(self.postdict)), len(guids)) * 100) if (diff > self.options.max_diff): log.warn('[{0}] Feed validation failed: {1} (diff: {2}% > {3}%)'.format(self.feed.id, self.feed.feed_url, round(diff, 1), self.options.max_diff)) return (FEED_INVALID, ret_values) else: self.postdict = dict() self.feed.save() for entry in self.fpf.entries: try: with transaction.atomic(): ret_entry = self.process_entry(entry) except: print_exc(self.feed.id) ret_entry = ENTRY_ERR ret_values[ret_entry] += 1 if (not ret_values[ENTRY_ERR]): self.feed.etag = (self.fpf.get('etag') or ) try: self.feed.last_modified = feedparser_ts(self.fpf.modified_parsed) except AttributeError: pass self.feed.save() return ((FEED_OK if (ret_values[ENTRY_NEW] or ret_values[ENTRY_UPDATED]) else FEED_SAME), ret_values)
def _process(self): ret_values = {ENTRY_NEW: 0, ENTRY_UPDATED: 0, ENTRY_SAME: 0, ENTRY_ERR: 0} report_errors = ((not self.options.report_after) or (not self.feed.last_checked) or ((self.feed.last_checked + self.options.report_after) < timezone.now())) feedparser_kws = dict() if ((sys.hexversion >= 34015488) and (not self.feed.verify_tls_certs)): import urllib2, ssl ctx = ssl.create_default_context() (ctx.check_hostname, ctx.verify_mode) = (False, ssl.CERT_NONE) feedparser_kws['handlers'] = [urllib2.HTTPSHandler(context=ctx)] try: self.fpf = feedparser.parse(self.feed.feed_url, agent=USER_AGENT, etag=(self.feed.etag if (not self.options.force) else ), **feedparser_kws) except KeyboardInterrupt: raise except: if report_errors: log.error('Feed cannot be parsed: {0} (#{1})'.format(self.feed.feed_url, self.feed.id)) return (FEED_ERRPARSE, ret_values) if hasattr(self.fpf, 'status'): log.extra('[{0}] HTTP status {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) if (self.fpf.status == 304): log.extra('[{0}] Feed has not changed since last check: {1}'.format(self.feed.id, self.feed.feed_url)) self.feed.last_checked = timezone.now() self.feed.save() return (FEED_SAME, ret_values) if (self.fpf.status >= 400): if report_errors: log.warn('[{0}] HTTP error {1}: {2}'.format(self.feed.id, self.fpf.status, self.feed.feed_url)) return (FEED_ERRFETCH, ret_values) if self.fpf.bozo: bozo = getattr(self.fpf, 'bozo_exception', 'unknown error') if (not self.feed.skip_errors): if report_errors: log.warn('[{0}] Failed to fetch feed: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) return (FEED_ERRFETCH, ret_values) elif report_errors: log.info('[{0}] Skipped feed error: {1} ({2})'.format(self.feed.id, self.feed.feed_url, bozo)) self.feed.title = self.fpf.feed.get('title', )[:200] self.feed.tagline = self.fpf.feed.get('tagline', ) self.feed.link = self.fpf.feed.get('link', ) self.feed.last_checked = timezone.now() log.debug('[{0}] Feed info for: {1}\n{2}'.format(self.feed.id, self.feed.feed_url, '\n'.join((' {0}: {1}'.format(key, getattr(self.feed, key)) for key in ['title', 'tagline', 'link', 'last_checked'])))) guids = filter(None, it.imap(self._get_guid, self.fpf.entries)) if guids: from feedjack.models import Post self.postdict = dict(((post.guid, post) for post in Post.objects.filter(feed=self.feed.id, guid__in=guids))) if self.options.max_diff: if ((not self.postdict) and (Post.objects.filter(feed=self.feed.id).count() == 0)): diff = 0 else: diff = (op.truediv((len(guids) - len(self.postdict)), len(guids)) * 100) if (diff > self.options.max_diff): log.warn('[{0}] Feed validation failed: {1} (diff: {2}% > {3}%)'.format(self.feed.id, self.feed.feed_url, round(diff, 1), self.options.max_diff)) return (FEED_INVALID, ret_values) else: self.postdict = dict() self.feed.save() for entry in self.fpf.entries: try: with transaction.atomic(): ret_entry = self.process_entry(entry) except: print_exc(self.feed.id) ret_entry = ENTRY_ERR ret_values[ret_entry] += 1 if (not ret_values[ENTRY_ERR]): self.feed.etag = (self.fpf.get('etag') or ) try: self.feed.last_modified = feedparser_ts(self.fpf.modified_parsed) except AttributeError: pass self.feed.save() return ((FEED_OK if (ret_values[ENTRY_NEW] or ret_values[ENTRY_UPDATED]) else FEED_SAME), ret_values)<|docstring|>Downloads and parses a feed.<|endoftext|>
e1c16077645decc8cb1c4fc4be1287799d54ad0e7dd679a9df588710bb4c8dfe
def compute_last_header(prev_header, hashes): 'Compute the last filter header from a starting header and a sequence of filter hashes.' header = ser_uint256(prev_header) for filter_hash in hashes: header = hash256((ser_uint256(filter_hash) + header)) return uint256_from_str(header)
Compute the last filter header from a starting header and a sequence of filter hashes.
test/functional/p2p_blockfilters.py
compute_last_header
daodaoshi/bitcoin-abc
1,266
python
def compute_last_header(prev_header, hashes): header = ser_uint256(prev_header) for filter_hash in hashes: header = hash256((ser_uint256(filter_hash) + header)) return uint256_from_str(header)
def compute_last_header(prev_header, hashes): header = ser_uint256(prev_header) for filter_hash in hashes: header = hash256((ser_uint256(filter_hash) + header)) return uint256_from_str(header)<|docstring|>Compute the last filter header from a starting header and a sequence of filter hashes.<|endoftext|>
805153c5f7dd7fb8b78f7e505c67208315dfde881df3a9f87dadb1f393276208
def on_cfilter(self, message): 'Store cfilters received in a list.' self.cfilters.append(message)
Store cfilters received in a list.
test/functional/p2p_blockfilters.py
on_cfilter
daodaoshi/bitcoin-abc
1,266
python
def on_cfilter(self, message): self.cfilters.append(message)
def on_cfilter(self, message): self.cfilters.append(message)<|docstring|>Store cfilters received in a list.<|endoftext|>
4cb599286d24bde0eff6571039c4447a60a85890120515d329ad7854c13c5332
def lens(lists): 'Returns the sizes of lists in a list.' return list(map(len, lists))
Returns the sizes of lists in a list.
open_spiel/python/algorithms/double_oracle.py
lens
tvanekeris/open_spiel
3,167
python
def lens(lists): return list(map(len, lists))
def lens(lists): return list(map(len, lists))<|docstring|>Returns the sizes of lists in a list.<|endoftext|>
ee32531125761fa840d009d24d07d69e73f5dd7ebbc22965d2a8f62100f9846e
def solve_subgame(subgame_payoffs): "Solves the subgame using OpenSpiel's LP solver." (p0_sol, p1_sol, _, _) = lp_solver.solve_zero_sum_matrix_game(pyspiel.create_matrix_game(*subgame_payoffs)) (p0_sol, p1_sol) = (np.asarray(p0_sol), np.asarray(p1_sol)) return [(p0_sol / p0_sol.sum()), (p1_sol / p1_sol.sum())]
Solves the subgame using OpenSpiel's LP solver.
open_spiel/python/algorithms/double_oracle.py
solve_subgame
tvanekeris/open_spiel
3,167
python
def solve_subgame(subgame_payoffs): (p0_sol, p1_sol, _, _) = lp_solver.solve_zero_sum_matrix_game(pyspiel.create_matrix_game(*subgame_payoffs)) (p0_sol, p1_sol) = (np.asarray(p0_sol), np.asarray(p1_sol)) return [(p0_sol / p0_sol.sum()), (p1_sol / p1_sol.sum())]
def solve_subgame(subgame_payoffs): (p0_sol, p1_sol, _, _) = lp_solver.solve_zero_sum_matrix_game(pyspiel.create_matrix_game(*subgame_payoffs)) (p0_sol, p1_sol) = (np.asarray(p0_sol), np.asarray(p1_sol)) return [(p0_sol / p0_sol.sum()), (p1_sol / p1_sol.sum())]<|docstring|>Solves the subgame using OpenSpiel's LP solver.<|endoftext|>
6fe1ba739463ae9a38ea016e3db1ec52ec98dd97e09d0bdd1595a7a82185181f
def __init__(self, game, enforce_symmetry=False): "Initializes the Double Oracle solver.\n\n Args:\n game: pyspiel.MatrixGame (zero-sum).\n enforce_symmetry: If True, enforces symmetry in the strategies appended by\n each player, by using the first player's best response for the second\n player as well; also asserts the game is symmetric and that players are\n seeded with identical initial_strategies, default: False.\n " assert isinstance(game, pyspiel.MatrixGame) assert (game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM) self.payoffs = utils.game_payoffs_array(game) self.subgame_strategies = [[], []] self.enforce_symmetry = enforce_symmetry if self.enforce_symmetry: assert utils.is_symmetric_matrix_game(self.payoffs), 'enforce_symmetry is True, but payoffs are asymmetric!'
Initializes the Double Oracle solver. Args: game: pyspiel.MatrixGame (zero-sum). enforce_symmetry: If True, enforces symmetry in the strategies appended by each player, by using the first player's best response for the second player as well; also asserts the game is symmetric and that players are seeded with identical initial_strategies, default: False.
open_spiel/python/algorithms/double_oracle.py
__init__
tvanekeris/open_spiel
3,167
python
def __init__(self, game, enforce_symmetry=False): "Initializes the Double Oracle solver.\n\n Args:\n game: pyspiel.MatrixGame (zero-sum).\n enforce_symmetry: If True, enforces symmetry in the strategies appended by\n each player, by using the first player's best response for the second\n player as well; also asserts the game is symmetric and that players are\n seeded with identical initial_strategies, default: False.\n " assert isinstance(game, pyspiel.MatrixGame) assert (game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM) self.payoffs = utils.game_payoffs_array(game) self.subgame_strategies = [[], []] self.enforce_symmetry = enforce_symmetry if self.enforce_symmetry: assert utils.is_symmetric_matrix_game(self.payoffs), 'enforce_symmetry is True, but payoffs are asymmetric!'
def __init__(self, game, enforce_symmetry=False): "Initializes the Double Oracle solver.\n\n Args:\n game: pyspiel.MatrixGame (zero-sum).\n enforce_symmetry: If True, enforces symmetry in the strategies appended by\n each player, by using the first player's best response for the second\n player as well; also asserts the game is symmetric and that players are\n seeded with identical initial_strategies, default: False.\n " assert isinstance(game, pyspiel.MatrixGame) assert (game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM) self.payoffs = utils.game_payoffs_array(game) self.subgame_strategies = [[], []] self.enforce_symmetry = enforce_symmetry if self.enforce_symmetry: assert utils.is_symmetric_matrix_game(self.payoffs), 'enforce_symmetry is True, but payoffs are asymmetric!'<|docstring|>Initializes the Double Oracle solver. Args: game: pyspiel.MatrixGame (zero-sum). enforce_symmetry: If True, enforces symmetry in the strategies appended by each player, by using the first player's best response for the second player as well; also asserts the game is symmetric and that players are seeded with identical initial_strategies, default: False.<|endoftext|>
bdd24b5329c9e6be03819d98026f6e660165c57802236b6ec6f249321add69e0
def oracle(self, subgame_solution): 'Computes the best responses.\n\n Args:\n subgame_solution: List of subgame solution policies.\n\n Returns:\n best_response: For both players from the original set of pure strategies.\n best_response_utility: Corresponding utility for both players.\n ' assert (lens(subgame_solution) == lens(self.subgame_strategies)), '{} != {}'.format(lens(subgame_solution), lens(self.subgame_strategies)) best_response = [None, None] best_response_utility = [None, None] n_best_responders = (1 if self.enforce_symmetry else 2) for player in range(n_best_responders): opponent = (1 - player) payoffs = np.take(self.payoffs[player], self.subgame_strategies[opponent], axis=opponent) payoffs = np.transpose(payoffs, [player, opponent]) avg_payoffs = (payoffs @ subgame_solution[opponent]).squeeze() best_response[player] = np.argmax(avg_payoffs) best_response_utility[player] = avg_payoffs[best_response[player]] if self.enforce_symmetry: best_response[1] = best_response[0] best_response_utility[1] = best_response_utility[0] return (best_response, best_response_utility)
Computes the best responses. Args: subgame_solution: List of subgame solution policies. Returns: best_response: For both players from the original set of pure strategies. best_response_utility: Corresponding utility for both players.
open_spiel/python/algorithms/double_oracle.py
oracle
tvanekeris/open_spiel
3,167
python
def oracle(self, subgame_solution): 'Computes the best responses.\n\n Args:\n subgame_solution: List of subgame solution policies.\n\n Returns:\n best_response: For both players from the original set of pure strategies.\n best_response_utility: Corresponding utility for both players.\n ' assert (lens(subgame_solution) == lens(self.subgame_strategies)), '{} != {}'.format(lens(subgame_solution), lens(self.subgame_strategies)) best_response = [None, None] best_response_utility = [None, None] n_best_responders = (1 if self.enforce_symmetry else 2) for player in range(n_best_responders): opponent = (1 - player) payoffs = np.take(self.payoffs[player], self.subgame_strategies[opponent], axis=opponent) payoffs = np.transpose(payoffs, [player, opponent]) avg_payoffs = (payoffs @ subgame_solution[opponent]).squeeze() best_response[player] = np.argmax(avg_payoffs) best_response_utility[player] = avg_payoffs[best_response[player]] if self.enforce_symmetry: best_response[1] = best_response[0] best_response_utility[1] = best_response_utility[0] return (best_response, best_response_utility)
def oracle(self, subgame_solution): 'Computes the best responses.\n\n Args:\n subgame_solution: List of subgame solution policies.\n\n Returns:\n best_response: For both players from the original set of pure strategies.\n best_response_utility: Corresponding utility for both players.\n ' assert (lens(subgame_solution) == lens(self.subgame_strategies)), '{} != {}'.format(lens(subgame_solution), lens(self.subgame_strategies)) best_response = [None, None] best_response_utility = [None, None] n_best_responders = (1 if self.enforce_symmetry else 2) for player in range(n_best_responders): opponent = (1 - player) payoffs = np.take(self.payoffs[player], self.subgame_strategies[opponent], axis=opponent) payoffs = np.transpose(payoffs, [player, opponent]) avg_payoffs = (payoffs @ subgame_solution[opponent]).squeeze() best_response[player] = np.argmax(avg_payoffs) best_response_utility[player] = avg_payoffs[best_response[player]] if self.enforce_symmetry: best_response[1] = best_response[0] best_response_utility[1] = best_response_utility[0] return (best_response, best_response_utility)<|docstring|>Computes the best responses. Args: subgame_solution: List of subgame solution policies. Returns: best_response: For both players from the original set of pure strategies. best_response_utility: Corresponding utility for both players.<|endoftext|>
e8ca4f7b16f0126882ad39133c84b2b02c3df708ccfb9bc2d76d78304418cbde
def step(self): 'Performs one iteration.' subgame_payoffs = self.subgame_payoffs() subgame_solution = solve_subgame(subgame_payoffs) (best_response, best_response_utility) = self.oracle(subgame_solution) self.subgame_strategies = [sorted(set((strategies + [br]))) for (strategies, br) in zip(self.subgame_strategies, best_response)] return (best_response, best_response_utility)
Performs one iteration.
open_spiel/python/algorithms/double_oracle.py
step
tvanekeris/open_spiel
3,167
python
def step(self): subgame_payoffs = self.subgame_payoffs() subgame_solution = solve_subgame(subgame_payoffs) (best_response, best_response_utility) = self.oracle(subgame_solution) self.subgame_strategies = [sorted(set((strategies + [br]))) for (strategies, br) in zip(self.subgame_strategies, best_response)] return (best_response, best_response_utility)
def step(self): subgame_payoffs = self.subgame_payoffs() subgame_solution = solve_subgame(subgame_payoffs) (best_response, best_response_utility) = self.oracle(subgame_solution) self.subgame_strategies = [sorted(set((strategies + [br]))) for (strategies, br) in zip(self.subgame_strategies, best_response)] return (best_response, best_response_utility)<|docstring|>Performs one iteration.<|endoftext|>
08f52bd9afd886e6b7ae2539a18a1d9ed34f5a48eafed4b5b686bbf71438419a
def solve_yield(self, initial_strategies, max_steps, tolerance, verbose, yield_subgame=False): 'Solves game using Double Oracle, yielding intermediate results.\n\n Args:\n initial_strategies: List of pure strategies for both players, optional.\n max_steps: Maximum number of iterations, default: 20.\n tolerance: Stop if the estimated value of the game is below the tolerance.\n verbose: If False, no warning is shown, default: True.\n yield_subgame: If True, yields the subgame on each iteration. Otherwise,\n yields the final results only, default: False.\n\n Yields:\n solution: Policies for both players.\n iteration: The number of iterations performed.\n value: Estimated value of the game.\n ' if (self.enforce_symmetry and initial_strategies): assert np.array_equal(initial_strategies[0], initial_strategies[1]), f'''Players must use same initial_strategies as symmetry is enforced. initial_strategies[0]: {initial_strategies[0]}, initial_strategies[1]: {initial_strategies[1]}''' self.subgame_strategies = (initial_strategies if initial_strategies else [[0], [0]]) iteration = 0 while (iteration < max_steps): if yield_subgame: (yield (None, iteration, None, self.subgame_payoffs())) iteration += 1 last_subgame_size = lens(self.subgame_strategies) (_, best_response_utility) = self.step() value = sum(best_response_utility) if (abs(value) < tolerance): if verbose: print('Last iteration={}; value below tolerance {} < {}.'.format(iteration, value, tolerance)) break if (lens(self.subgame_strategies) == last_subgame_size): if verbose: print('Last iteration={}; no strategies added, increase tolerance={} or check subgame solver.'.format(iteration, tolerance)) break subgame_solution = solve_subgame(self.subgame_payoffs()) solution = [np.zeros(k) for k in self.payoffs.shape[1:]] for p in range(2): solution[p][self.subgame_strategies[p]] = subgame_solution[p].squeeze() (yield (solution, iteration, value, self.subgame_payoffs()))
Solves game using Double Oracle, yielding intermediate results. Args: initial_strategies: List of pure strategies for both players, optional. max_steps: Maximum number of iterations, default: 20. tolerance: Stop if the estimated value of the game is below the tolerance. verbose: If False, no warning is shown, default: True. yield_subgame: If True, yields the subgame on each iteration. Otherwise, yields the final results only, default: False. Yields: solution: Policies for both players. iteration: The number of iterations performed. value: Estimated value of the game.
open_spiel/python/algorithms/double_oracle.py
solve_yield
tvanekeris/open_spiel
3,167
python
def solve_yield(self, initial_strategies, max_steps, tolerance, verbose, yield_subgame=False): 'Solves game using Double Oracle, yielding intermediate results.\n\n Args:\n initial_strategies: List of pure strategies for both players, optional.\n max_steps: Maximum number of iterations, default: 20.\n tolerance: Stop if the estimated value of the game is below the tolerance.\n verbose: If False, no warning is shown, default: True.\n yield_subgame: If True, yields the subgame on each iteration. Otherwise,\n yields the final results only, default: False.\n\n Yields:\n solution: Policies for both players.\n iteration: The number of iterations performed.\n value: Estimated value of the game.\n ' if (self.enforce_symmetry and initial_strategies): assert np.array_equal(initial_strategies[0], initial_strategies[1]), f'Players must use same initial_strategies as symmetry is enforced. initial_strategies[0]: {initial_strategies[0]}, initial_strategies[1]: {initial_strategies[1]}' self.subgame_strategies = (initial_strategies if initial_strategies else [[0], [0]]) iteration = 0 while (iteration < max_steps): if yield_subgame: (yield (None, iteration, None, self.subgame_payoffs())) iteration += 1 last_subgame_size = lens(self.subgame_strategies) (_, best_response_utility) = self.step() value = sum(best_response_utility) if (abs(value) < tolerance): if verbose: print('Last iteration={}; value below tolerance {} < {}.'.format(iteration, value, tolerance)) break if (lens(self.subgame_strategies) == last_subgame_size): if verbose: print('Last iteration={}; no strategies added, increase tolerance={} or check subgame solver.'.format(iteration, tolerance)) break subgame_solution = solve_subgame(self.subgame_payoffs()) solution = [np.zeros(k) for k in self.payoffs.shape[1:]] for p in range(2): solution[p][self.subgame_strategies[p]] = subgame_solution[p].squeeze() (yield (solution, iteration, value, self.subgame_payoffs()))
def solve_yield(self, initial_strategies, max_steps, tolerance, verbose, yield_subgame=False): 'Solves game using Double Oracle, yielding intermediate results.\n\n Args:\n initial_strategies: List of pure strategies for both players, optional.\n max_steps: Maximum number of iterations, default: 20.\n tolerance: Stop if the estimated value of the game is below the tolerance.\n verbose: If False, no warning is shown, default: True.\n yield_subgame: If True, yields the subgame on each iteration. Otherwise,\n yields the final results only, default: False.\n\n Yields:\n solution: Policies for both players.\n iteration: The number of iterations performed.\n value: Estimated value of the game.\n ' if (self.enforce_symmetry and initial_strategies): assert np.array_equal(initial_strategies[0], initial_strategies[1]), f'Players must use same initial_strategies as symmetry is enforced. initial_strategies[0]: {initial_strategies[0]}, initial_strategies[1]: {initial_strategies[1]}' self.subgame_strategies = (initial_strategies if initial_strategies else [[0], [0]]) iteration = 0 while (iteration < max_steps): if yield_subgame: (yield (None, iteration, None, self.subgame_payoffs())) iteration += 1 last_subgame_size = lens(self.subgame_strategies) (_, best_response_utility) = self.step() value = sum(best_response_utility) if (abs(value) < tolerance): if verbose: print('Last iteration={}; value below tolerance {} < {}.'.format(iteration, value, tolerance)) break if (lens(self.subgame_strategies) == last_subgame_size): if verbose: print('Last iteration={}; no strategies added, increase tolerance={} or check subgame solver.'.format(iteration, tolerance)) break subgame_solution = solve_subgame(self.subgame_payoffs()) solution = [np.zeros(k) for k in self.payoffs.shape[1:]] for p in range(2): solution[p][self.subgame_strategies[p]] = subgame_solution[p].squeeze() (yield (solution, iteration, value, self.subgame_payoffs()))<|docstring|>Solves game using Double Oracle, yielding intermediate results. Args: initial_strategies: List of pure strategies for both players, optional. max_steps: Maximum number of iterations, default: 20. tolerance: Stop if the estimated value of the game is below the tolerance. verbose: If False, no warning is shown, default: True. yield_subgame: If True, yields the subgame on each iteration. Otherwise, yields the final results only, default: False. Yields: solution: Policies for both players. iteration: The number of iterations performed. value: Estimated value of the game.<|endoftext|>
d078c2a44ff011a661425317bb9de9fbbaa5d8b205dc6e6af7fdf1cd6168ada7
def solve(self, initial_strategies=None, max_steps=20, tolerance=5e-05, verbose=True): 'Solves the game using Double Oracle, returning the final solution.' (solution, iteration, value) = (None, None, None) generator = self.solve_yield(initial_strategies, max_steps, tolerance, verbose, yield_subgame=False) for (solution, iteration, value, _) in generator: pass return (solution, iteration, value)
Solves the game using Double Oracle, returning the final solution.
open_spiel/python/algorithms/double_oracle.py
solve
tvanekeris/open_spiel
3,167
python
def solve(self, initial_strategies=None, max_steps=20, tolerance=5e-05, verbose=True): (solution, iteration, value) = (None, None, None) generator = self.solve_yield(initial_strategies, max_steps, tolerance, verbose, yield_subgame=False) for (solution, iteration, value, _) in generator: pass return (solution, iteration, value)
def solve(self, initial_strategies=None, max_steps=20, tolerance=5e-05, verbose=True): (solution, iteration, value) = (None, None, None) generator = self.solve_yield(initial_strategies, max_steps, tolerance, verbose, yield_subgame=False) for (solution, iteration, value, _) in generator: pass return (solution, iteration, value)<|docstring|>Solves the game using Double Oracle, returning the final solution.<|endoftext|>
02863c6a29bc2ce03d843e39dfc95c7dd321fa8cd6c41b468dca5bb947ecaae6
def _message_to_pb(message: Message): '\n Turns the given message to a protocol buffer\n ' if message.is_normal_message: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), text=conversations_pb2.MessageContentText(text=message.text)) else: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), chat_created=(conversations_pb2.MessageContentChatCreated() if (message.message_type == MessageType.chat_created) else None), chat_edited=(conversations_pb2.MessageContentChatEdited() if (message.message_type == MessageType.chat_edited) else None), user_invited=(conversations_pb2.MessageContentUserInvited(target_user_id=message.target_id) if (message.message_type == MessageType.user_invited) else None), user_left=(conversations_pb2.MessageContentUserLeft() if (message.message_type == MessageType.user_left) else None), user_made_admin=(conversations_pb2.MessageContentUserMadeAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_made_admin) else None), user_removed_admin=(conversations_pb2.MessageContentUserRemovedAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed_admin) else None), group_chat_user_removed=(conversations_pb2.MessageContentUserRemoved(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed) else None))
Turns the given message to a protocol buffer
app/backend/src/couchers/servicers/conversations.py
_message_to_pb
dieterpankratz/couchers
226
python
def _message_to_pb(message: Message): '\n \n ' if message.is_normal_message: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), text=conversations_pb2.MessageContentText(text=message.text)) else: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), chat_created=(conversations_pb2.MessageContentChatCreated() if (message.message_type == MessageType.chat_created) else None), chat_edited=(conversations_pb2.MessageContentChatEdited() if (message.message_type == MessageType.chat_edited) else None), user_invited=(conversations_pb2.MessageContentUserInvited(target_user_id=message.target_id) if (message.message_type == MessageType.user_invited) else None), user_left=(conversations_pb2.MessageContentUserLeft() if (message.message_type == MessageType.user_left) else None), user_made_admin=(conversations_pb2.MessageContentUserMadeAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_made_admin) else None), user_removed_admin=(conversations_pb2.MessageContentUserRemovedAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed_admin) else None), group_chat_user_removed=(conversations_pb2.MessageContentUserRemoved(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed) else None))
def _message_to_pb(message: Message): '\n \n ' if message.is_normal_message: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), text=conversations_pb2.MessageContentText(text=message.text)) else: return conversations_pb2.Message(message_id=message.id, author_user_id=message.author_id, time=Timestamp_from_datetime(message.time), chat_created=(conversations_pb2.MessageContentChatCreated() if (message.message_type == MessageType.chat_created) else None), chat_edited=(conversations_pb2.MessageContentChatEdited() if (message.message_type == MessageType.chat_edited) else None), user_invited=(conversations_pb2.MessageContentUserInvited(target_user_id=message.target_id) if (message.message_type == MessageType.user_invited) else None), user_left=(conversations_pb2.MessageContentUserLeft() if (message.message_type == MessageType.user_left) else None), user_made_admin=(conversations_pb2.MessageContentUserMadeAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_made_admin) else None), user_removed_admin=(conversations_pb2.MessageContentUserRemovedAdmin(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed_admin) else None), group_chat_user_removed=(conversations_pb2.MessageContentUserRemoved(target_user_id=message.target_id) if (message.message_type == MessageType.user_removed) else None))<|docstring|>Turns the given message to a protocol buffer<|endoftext|>
5d240f3a5e28c87fbd3a84c69a6cc6acdc307214e4d9d5c1984d62929f553dd0
def _get_visible_members_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]
If a user leaves a group chat, they shouldn't be able to see who's added after they left
app/backend/src/couchers/servicers/conversations.py
_get_visible_members_for_subscription
dieterpankratz/couchers
226
python
def _get_visible_members_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]
def _get_visible_members_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]<|docstring|>If a user leaves a group chat, they shouldn't be able to see who's added after they left<|endoftext|>
49ab5cec10d5af5f1268f32f86535b5eb7dec45afa37d91931872f0248c7ca0d
def _get_visible_admins_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None)).where((GroupChatSubscription.role == GroupChatRole.admin))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.role == GroupChatRole.admin)).where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]
If a user leaves a group chat, they shouldn't be able to see who's added after they left
app/backend/src/couchers/servicers/conversations.py
_get_visible_admins_for_subscription
dieterpankratz/couchers
226
python
def _get_visible_admins_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None)).where((GroupChatSubscription.role == GroupChatRole.admin))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.role == GroupChatRole.admin)).where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]
def _get_visible_admins_for_subscription(subscription): "\n If a user leaves a group chat, they shouldn't be able to see who's added\n after they left\n " if (not subscription.left): return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.left == None)).where((GroupChatSubscription.role == GroupChatRole.admin))] else: return [sub.user_id for sub in subscription.group_chat.subscriptions.where((GroupChatSubscription.role == GroupChatRole.admin)).where((GroupChatSubscription.joined <= subscription.left)).where(or_((GroupChatSubscription.left >= subscription.left), (GroupChatSubscription.left == None)))]<|docstring|>If a user leaves a group chat, they shouldn't be able to see who's added after they left<|endoftext|>
4065bd4fdc746feab3e7ec5564c2b0fa6353de66c8795affb5dce47265fad766
def _add_message_to_subscription(session, subscription, **kwargs): '\n Creates a new message for a subscription, from the user whose subscription that is. Updates last seen message id\n\n Specify the keyword args for Message\n ' message = Message(conversation=subscription.group_chat.conversation, author_id=subscription.user_id, **kwargs) session.add(message) session.flush() subscription.last_seen_message_id = message.id return message
Creates a new message for a subscription, from the user whose subscription that is. Updates last seen message id Specify the keyword args for Message
app/backend/src/couchers/servicers/conversations.py
_add_message_to_subscription
dieterpankratz/couchers
226
python
def _add_message_to_subscription(session, subscription, **kwargs): '\n Creates a new message for a subscription, from the user whose subscription that is. Updates last seen message id\n\n Specify the keyword args for Message\n ' message = Message(conversation=subscription.group_chat.conversation, author_id=subscription.user_id, **kwargs) session.add(message) session.flush() subscription.last_seen_message_id = message.id return message
def _add_message_to_subscription(session, subscription, **kwargs): '\n Creates a new message for a subscription, from the user whose subscription that is. Updates last seen message id\n\n Specify the keyword args for Message\n ' message = Message(conversation=subscription.group_chat.conversation, author_id=subscription.user_id, **kwargs) session.add(message) session.flush() subscription.last_seen_message_id = message.id return message<|docstring|>Creates a new message for a subscription, from the user whose subscription that is. Updates last seen message id Specify the keyword args for Message<|endoftext|>
185ee779b742815c7015aaa8dc41a8f7b92316349a27cdc5fd41e8e86531a5aa
def RemoveGroupChatUser(self, request, context): "\n 1. Get admin info and check it's correct\n 2. Get user data, check it's correct and remove user\n " with session_scope() as session: your_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == context.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not your_subscription): context.abort(grpc.StatusCode.NOT_FOUND, errors.CHAT_NOT_FOUND) if (your_subscription.role != GroupChatRole.admin): context.abort(grpc.StatusCode.PERMISSION_DENIED, errors.ONLY_ADMIN_CAN_REMOVE_USER) if (request.user_id == context.user_id): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.CANT_REMOVE_SELF) their_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == request.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not their_subscription): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.USER_NOT_IN_CHAT) _add_message_to_subscription(session, your_subscription, message_type=MessageType.user_removed, target_id=request.user_id) their_subscription.left = func.now() return empty_pb2.Empty()
1. Get admin info and check it's correct 2. Get user data, check it's correct and remove user
app/backend/src/couchers/servicers/conversations.py
RemoveGroupChatUser
dieterpankratz/couchers
226
python
def RemoveGroupChatUser(self, request, context): "\n 1. Get admin info and check it's correct\n 2. Get user data, check it's correct and remove user\n " with session_scope() as session: your_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == context.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not your_subscription): context.abort(grpc.StatusCode.NOT_FOUND, errors.CHAT_NOT_FOUND) if (your_subscription.role != GroupChatRole.admin): context.abort(grpc.StatusCode.PERMISSION_DENIED, errors.ONLY_ADMIN_CAN_REMOVE_USER) if (request.user_id == context.user_id): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.CANT_REMOVE_SELF) their_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == request.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not their_subscription): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.USER_NOT_IN_CHAT) _add_message_to_subscription(session, your_subscription, message_type=MessageType.user_removed, target_id=request.user_id) their_subscription.left = func.now() return empty_pb2.Empty()
def RemoveGroupChatUser(self, request, context): "\n 1. Get admin info and check it's correct\n 2. Get user data, check it's correct and remove user\n " with session_scope() as session: your_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == context.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not your_subscription): context.abort(grpc.StatusCode.NOT_FOUND, errors.CHAT_NOT_FOUND) if (your_subscription.role != GroupChatRole.admin): context.abort(grpc.StatusCode.PERMISSION_DENIED, errors.ONLY_ADMIN_CAN_REMOVE_USER) if (request.user_id == context.user_id): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.CANT_REMOVE_SELF) their_subscription = session.execute(select(GroupChatSubscription).where((GroupChatSubscription.group_chat_id == request.group_chat_id)).where((GroupChatSubscription.user_id == request.user_id)).where((GroupChatSubscription.left == None))).scalar_one_or_none() if (not their_subscription): context.abort(grpc.StatusCode.FAILED_PRECONDITION, errors.USER_NOT_IN_CHAT) _add_message_to_subscription(session, your_subscription, message_type=MessageType.user_removed, target_id=request.user_id) their_subscription.left = func.now() return empty_pb2.Empty()<|docstring|>1. Get admin info and check it's correct 2. Get user data, check it's correct and remove user<|endoftext|>
63e3a6b8a3412f173137ac192fd0fa0008e5269a7f42783d47d2021a130b9d6e
def disk(d=60, h=10, maxh=5.0, relaxed=True, name='normal_modes_nanodisk', A=1.3e-11, H_ext_relax=[100000.0, 1000.0, 0], H_ext_ringdown=[100000.0, 0, 0], demag_solver='FK', demag_solver_type=None, force_relaxation=False): '\n Permalloy nanodisk with diameter d=60 nm, height h=10 nm and mesh\n discretisation maxh=5.0. An external field of strength 100 kA/m is\n applied along the x-axis, with a small (1 kA/m) y-component for\n the relaxation which is then removed for the ringdown phase.\n\n ' MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) if ((d == 60) and (h == 10) and (maxh == 5.0)): mesh = df.Mesh(os.path.join(MODULE_DIR, 'disk__d_60__h_10__maxh_5.xml.gz')) else: mesh = Nanodisk(d=d, h=h).create_mesh(maxh=maxh) Ms = 800000.0 m_init = [1, 0, 0] alpha_relax = 1.0 sim = finmag.normal_mode_simulation(mesh, Ms, m_init, alpha=alpha_relax, unit_length=1e-09, A=A, H_ext=H_ext_relax, demag_solver=demag_solver, demag_solver_type=demag_solver_type, name=name) if relaxed: if ((not force_relaxation) and ((d == 60) and (h == 10) and (maxh == 5.0))): sim.restart(os.path.join(MODULE_DIR, 'disk_relaxed.npz')) else: sim.relax() alpha_ringdown = 0.01 t_end = 1e-09 save_ndt_every = 1e-11 save_m_every = 1e-11 m_snapshots_filename = 'snapshots/m_ringdown.npy' def ringdown(t_end=1e-09, alpha=alpha_ringdown, H_ext=H_ext_ringdown, reset_time=True, clear_schedule=True, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=False): sim.run_ringdown(t_end=t_end, alpha=alpha, H_ext=H_ext, reset_time=reset_time, clear_schedule=clear_schedule, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=overwrite) sim.ringdown = ringdown return sim
Permalloy nanodisk with diameter d=60 nm, height h=10 nm and mesh discretisation maxh=5.0. An external field of strength 100 kA/m is applied along the x-axis, with a small (1 kA/m) y-component for the relaxation which is then removed for the ringdown phase.
src/finmag/example/normal_modes/disk.py
disk
davidcortesortuno/finmag
10
python
def disk(d=60, h=10, maxh=5.0, relaxed=True, name='normal_modes_nanodisk', A=1.3e-11, H_ext_relax=[100000.0, 1000.0, 0], H_ext_ringdown=[100000.0, 0, 0], demag_solver='FK', demag_solver_type=None, force_relaxation=False): '\n Permalloy nanodisk with diameter d=60 nm, height h=10 nm and mesh\n discretisation maxh=5.0. An external field of strength 100 kA/m is\n applied along the x-axis, with a small (1 kA/m) y-component for\n the relaxation which is then removed for the ringdown phase.\n\n ' MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) if ((d == 60) and (h == 10) and (maxh == 5.0)): mesh = df.Mesh(os.path.join(MODULE_DIR, 'disk__d_60__h_10__maxh_5.xml.gz')) else: mesh = Nanodisk(d=d, h=h).create_mesh(maxh=maxh) Ms = 800000.0 m_init = [1, 0, 0] alpha_relax = 1.0 sim = finmag.normal_mode_simulation(mesh, Ms, m_init, alpha=alpha_relax, unit_length=1e-09, A=A, H_ext=H_ext_relax, demag_solver=demag_solver, demag_solver_type=demag_solver_type, name=name) if relaxed: if ((not force_relaxation) and ((d == 60) and (h == 10) and (maxh == 5.0))): sim.restart(os.path.join(MODULE_DIR, 'disk_relaxed.npz')) else: sim.relax() alpha_ringdown = 0.01 t_end = 1e-09 save_ndt_every = 1e-11 save_m_every = 1e-11 m_snapshots_filename = 'snapshots/m_ringdown.npy' def ringdown(t_end=1e-09, alpha=alpha_ringdown, H_ext=H_ext_ringdown, reset_time=True, clear_schedule=True, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=False): sim.run_ringdown(t_end=t_end, alpha=alpha, H_ext=H_ext, reset_time=reset_time, clear_schedule=clear_schedule, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=overwrite) sim.ringdown = ringdown return sim
def disk(d=60, h=10, maxh=5.0, relaxed=True, name='normal_modes_nanodisk', A=1.3e-11, H_ext_relax=[100000.0, 1000.0, 0], H_ext_ringdown=[100000.0, 0, 0], demag_solver='FK', demag_solver_type=None, force_relaxation=False): '\n Permalloy nanodisk with diameter d=60 nm, height h=10 nm and mesh\n discretisation maxh=5.0. An external field of strength 100 kA/m is\n applied along the x-axis, with a small (1 kA/m) y-component for\n the relaxation which is then removed for the ringdown phase.\n\n ' MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) if ((d == 60) and (h == 10) and (maxh == 5.0)): mesh = df.Mesh(os.path.join(MODULE_DIR, 'disk__d_60__h_10__maxh_5.xml.gz')) else: mesh = Nanodisk(d=d, h=h).create_mesh(maxh=maxh) Ms = 800000.0 m_init = [1, 0, 0] alpha_relax = 1.0 sim = finmag.normal_mode_simulation(mesh, Ms, m_init, alpha=alpha_relax, unit_length=1e-09, A=A, H_ext=H_ext_relax, demag_solver=demag_solver, demag_solver_type=demag_solver_type, name=name) if relaxed: if ((not force_relaxation) and ((d == 60) and (h == 10) and (maxh == 5.0))): sim.restart(os.path.join(MODULE_DIR, 'disk_relaxed.npz')) else: sim.relax() alpha_ringdown = 0.01 t_end = 1e-09 save_ndt_every = 1e-11 save_m_every = 1e-11 m_snapshots_filename = 'snapshots/m_ringdown.npy' def ringdown(t_end=1e-09, alpha=alpha_ringdown, H_ext=H_ext_ringdown, reset_time=True, clear_schedule=True, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=False): sim.run_ringdown(t_end=t_end, alpha=alpha, H_ext=H_ext, reset_time=reset_time, clear_schedule=clear_schedule, save_ndt_every=save_ndt_every, save_vtk_every=None, save_m_every=save_m_every, vtk_snapshots_filename=None, m_snapshots_filename=m_snapshots_filename, overwrite=overwrite) sim.ringdown = ringdown return sim<|docstring|>Permalloy nanodisk with diameter d=60 nm, height h=10 nm and mesh discretisation maxh=5.0. An external field of strength 100 kA/m is applied along the x-axis, with a small (1 kA/m) y-component for the relaxation which is then removed for the ringdown phase.<|endoftext|>
39350e102b9e52a8a46c76aaaea5600ac4a219f0ba85458ac20335cca8253f15
def ignore_aiohttp_ssl_error(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n There is an issue in Python 3.7 up to 3.7.3 that over-reports a\n ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n after close notify (_ssl.c:2609)) after we are already done with the\n connection. See GitHub issues aio-libs/aiohttp#3535 and\n python/cpython#13548.\n\n Given a loop, this sets up an exception handler that ignores this specific\n exception, but passes everything else on to the previous exception handler\n this one replaces.\n\n Checks for fixed Python versions, disabling itself when running on 3.7.4+\n or 3.8.\n\n ' if (sys.version_info >= (3, 7, 4)): return orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): if (context.get('message') in {'SSL error in data received', 'Fatal error on transport'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)
Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close There is an issue in Python 3.7 up to 3.7.3 that over-reports a ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data after close notify (_ssl.c:2609)) after we are already done with the connection. See GitHub issues aio-libs/aiohttp#3535 and python/cpython#13548. Given a loop, this sets up an exception handler that ignores this specific exception, but passes everything else on to the previous exception handler this one replaces. Checks for fixed Python versions, disabling itself when running on 3.7.4+ or 3.8.
script/calc-reps.py
ignore_aiohttp_ssl_error
Joohansson/nanoticker
6
python
def ignore_aiohttp_ssl_error(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n There is an issue in Python 3.7 up to 3.7.3 that over-reports a\n ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n after close notify (_ssl.c:2609)) after we are already done with the\n connection. See GitHub issues aio-libs/aiohttp#3535 and\n python/cpython#13548.\n\n Given a loop, this sets up an exception handler that ignores this specific\n exception, but passes everything else on to the previous exception handler\n this one replaces.\n\n Checks for fixed Python versions, disabling itself when running on 3.7.4+\n or 3.8.\n\n ' if (sys.version_info >= (3, 7, 4)): return orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): if (context.get('message') in {'SSL error in data received', 'Fatal error on transport'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)
def ignore_aiohttp_ssl_error(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n There is an issue in Python 3.7 up to 3.7.3 that over-reports a\n ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n after close notify (_ssl.c:2609)) after we are already done with the\n connection. See GitHub issues aio-libs/aiohttp#3535 and\n python/cpython#13548.\n\n Given a loop, this sets up an exception handler that ignores this specific\n exception, but passes everything else on to the previous exception handler\n this one replaces.\n\n Checks for fixed Python versions, disabling itself when running on 3.7.4+\n or 3.8.\n\n ' if (sys.version_info >= (3, 7, 4)): return orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): if (context.get('message') in {'SSL error in data received', 'Fatal error on transport'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)<|docstring|>Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close There is an issue in Python 3.7 up to 3.7.3 that over-reports a ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data after close notify (_ssl.c:2609)) after we are already done with the connection. See GitHub issues aio-libs/aiohttp#3535 and python/cpython#13548. Given a loop, this sets up an exception handler that ignores this specific exception, but passes everything else on to the previous exception handler this one replaces. Checks for fixed Python versions, disabling itself when running on 3.7.4+ or 3.8.<|endoftext|>
43c51cb7eb7945b97fc4c9ed1c9173170046bf16d38d661c08223d287c3115ae
def chunks(l, n): 'Yield successive n-sized chunks from l' for i in range(0, len(l), n): (yield l[i:(i + n)])
Yield successive n-sized chunks from l
script/calc-reps.py
chunks
Joohansson/nanoticker
6
python
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])<|docstring|>Yield successive n-sized chunks from l<|endoftext|>
4644a3f72ee917b62b3c2820efa8dd2abe730e6e704f47f222ce41d9b8541aad
def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: '\n :param x: (batch size, channels, height, width)\n :return: (batch size, class), (batch size, class), heatmap (batch size, 1, height, width)\n ' origin_feature = self.feature_extractor(x)[(- 1)] (attention_output, attention_map) = self.attention_branch(origin_feature) perception_feature = ((origin_feature * attention_map) + origin_feature) perception_output = self.perception_branch(perception_feature) return (perception_output, attention_output, attention_map)
:param x: (batch size, channels, height, width) :return: (batch size, class), (batch size, class), heatmap (batch size, 1, height, width)
deepext_with_lightning/models/classification/abn/modules.py
forward
pei223/deepext_with_lightning
1
python
def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: '\n :param x: (batch size, channels, height, width)\n :return: (batch size, class), (batch size, class), heatmap (batch size, 1, height, width)\n ' origin_feature = self.feature_extractor(x)[(- 1)] (attention_output, attention_map) = self.attention_branch(origin_feature) perception_feature = ((origin_feature * attention_map) + origin_feature) perception_output = self.perception_branch(perception_feature) return (perception_output, attention_output, attention_map)
def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: '\n :param x: (batch size, channels, height, width)\n :return: (batch size, class), (batch size, class), heatmap (batch size, 1, height, width)\n ' origin_feature = self.feature_extractor(x)[(- 1)] (attention_output, attention_map) = self.attention_branch(origin_feature) perception_feature = ((origin_feature * attention_map) + origin_feature) perception_output = self.perception_branch(perception_feature) return (perception_output, attention_output, attention_map)<|docstring|>:param x: (batch size, channels, height, width) :return: (batch size, class), (batch size, class), heatmap (batch size, 1, height, width)<|endoftext|>
6cb4c00b86cbb35b74766b1f1d8589d0c3cfe42b59ed191bd48c2114c364a7df
def load_tests(*_): 'Load doctests as unittest test suite.\n\n For the parameters, see :mod:`unittest`. The parameters are unused here.\n ' suite = TestSuite() suite.addTests(DocTestSuite('COT.helpers.helper')) return suite
Load doctests as unittest test suite. For the parameters, see :mod:`unittest`. The parameters are unused here.
COT/helpers/tests/test_doctests.py
load_tests
morneaup/cot
81
python
def load_tests(*_): 'Load doctests as unittest test suite.\n\n For the parameters, see :mod:`unittest`. The parameters are unused here.\n ' suite = TestSuite() suite.addTests(DocTestSuite('COT.helpers.helper')) return suite
def load_tests(*_): 'Load doctests as unittest test suite.\n\n For the parameters, see :mod:`unittest`. The parameters are unused here.\n ' suite = TestSuite() suite.addTests(DocTestSuite('COT.helpers.helper')) return suite<|docstring|>Load doctests as unittest test suite. For the parameters, see :mod:`unittest`. The parameters are unused here.<|endoftext|>
ce8f6434b0cd2a780df2e69b928172a0e9d3db0cd70d664a3a666fcca1a1be45
def get_LOC_entity(tag_seq, char_seq): '\n 这里需要对输出序列进行判断,对连续序列进行拼接\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) LOC = [] location = [] for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-LOC'): if ('loc' in locals().keys()): LOC.append(loc) del loc loc = char if ((i + 1) == length): LOC.append(loc) if (tag == 'I-LOC'): loc += char if ((i + 1) == length): LOC.append(loc) if (tag not in ['I-LOC', 'B-LOC']): if ('loc' in locals().keys()): LOC.append(loc) del loc continue return LOC
这里需要对输出序列进行判断,对连续序列进行拼接 :param tag_seq: :param char_seq: :return:
Chatbot_Model/Info_Extraction/Entity_Extraction/utils.py
get_LOC_entity
daniellibin/Chatbot_CN
8
python
def get_LOC_entity(tag_seq, char_seq): '\n 这里需要对输出序列进行判断,对连续序列进行拼接\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) LOC = [] location = [] for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-LOC'): if ('loc' in locals().keys()): LOC.append(loc) del loc loc = char if ((i + 1) == length): LOC.append(loc) if (tag == 'I-LOC'): loc += char if ((i + 1) == length): LOC.append(loc) if (tag not in ['I-LOC', 'B-LOC']): if ('loc' in locals().keys()): LOC.append(loc) del loc continue return LOC
def get_LOC_entity(tag_seq, char_seq): '\n 这里需要对输出序列进行判断,对连续序列进行拼接\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) LOC = [] location = [] for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-LOC'): if ('loc' in locals().keys()): LOC.append(loc) del loc loc = char if ((i + 1) == length): LOC.append(loc) if (tag == 'I-LOC'): loc += char if ((i + 1) == length): LOC.append(loc) if (tag not in ['I-LOC', 'B-LOC']): if ('loc' in locals().keys()): LOC.append(loc) del loc continue return LOC<|docstring|>这里需要对输出序列进行判断,对连续序列进行拼接 :param tag_seq: :param char_seq: :return:<|endoftext|>
4a8b86d2da5f01fce855d62a59a0167ff3be086f4c65ad521ed12b81a0fc3990
def get_TIM_entity(tag_seq, char_seq): '\n 获取时间实体\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) TIM = [] try: for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-TIM'): if ('tim' in locals().keys()): TIM.append(org) del org org = char if ((i + 1) == length): TIM.append(org) if (tag == 'I-TIM'): org += char if ((i + 1) == length): TIM.append(org) if (tag not in ['I-TIM', 'B-TIM']): if ('tim' in locals().keys()): TIM.append(org) del org continue TIM = list(set(TIM)) except Exception as e: print('error is ', e) return TIM
获取时间实体 :param tag_seq: :param char_seq: :return:
Chatbot_Model/Info_Extraction/Entity_Extraction/utils.py
get_TIM_entity
daniellibin/Chatbot_CN
8
python
def get_TIM_entity(tag_seq, char_seq): '\n 获取时间实体\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) TIM = [] try: for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-TIM'): if ('tim' in locals().keys()): TIM.append(org) del org org = char if ((i + 1) == length): TIM.append(org) if (tag == 'I-TIM'): org += char if ((i + 1) == length): TIM.append(org) if (tag not in ['I-TIM', 'B-TIM']): if ('tim' in locals().keys()): TIM.append(org) del org continue TIM = list(set(TIM)) except Exception as e: print('error is ', e) return TIM
def get_TIM_entity(tag_seq, char_seq): '\n 获取时间实体\n :param tag_seq:\n :param char_seq:\n :return:\n ' length = len(char_seq) TIM = [] try: for (i, (char, tag)) in enumerate(zip(char_seq, tag_seq)): if (tag == 'B-TIM'): if ('tim' in locals().keys()): TIM.append(org) del org org = char if ((i + 1) == length): TIM.append(org) if (tag == 'I-TIM'): org += char if ((i + 1) == length): TIM.append(org) if (tag not in ['I-TIM', 'B-TIM']): if ('tim' in locals().keys()): TIM.append(org) del org continue TIM = list(set(TIM)) except Exception as e: print('error is ', e) return TIM<|docstring|>获取时间实体 :param tag_seq: :param char_seq: :return:<|endoftext|>
a9165b87a3270f321218e392cef7b86f18c91dd902979cab0cb543ae0ebe12f4
def get_MON_entity(text): '\n 获取金额实体\n :param text:\n :return:\n ' M = [] MON = [] tr = proprecess_money.wash_data(text) sent = proprecess_money.split_sentence(tr) for sentence in sent: money = proprecess_money.get_properties_and_values(sentence) M.append(money) for i in range(len(M)): if M[i]: MON.append(M[i]) dup = (lambda x, y: (x if (y in x) else (x + [y]))) MON = reduce(dup, ([[]] + MON)) return MON
获取金额实体 :param text: :return:
Chatbot_Model/Info_Extraction/Entity_Extraction/utils.py
get_MON_entity
daniellibin/Chatbot_CN
8
python
def get_MON_entity(text): '\n 获取金额实体\n :param text:\n :return:\n ' M = [] MON = [] tr = proprecess_money.wash_data(text) sent = proprecess_money.split_sentence(tr) for sentence in sent: money = proprecess_money.get_properties_and_values(sentence) M.append(money) for i in range(len(M)): if M[i]: MON.append(M[i]) dup = (lambda x, y: (x if (y in x) else (x + [y]))) MON = reduce(dup, ([[]] + MON)) return MON
def get_MON_entity(text): '\n 获取金额实体\n :param text:\n :return:\n ' M = [] MON = [] tr = proprecess_money.wash_data(text) sent = proprecess_money.split_sentence(tr) for sentence in sent: money = proprecess_money.get_properties_and_values(sentence) M.append(money) for i in range(len(M)): if M[i]: MON.append(M[i]) dup = (lambda x, y: (x if (y in x) else (x + [y]))) MON = reduce(dup, ([[]] + MON)) return MON<|docstring|>获取金额实体 :param text: :return:<|endoftext|>
b607dc40081536ecf5a5c4ec0fa3070f12e7bba801c1f76a22f24b78403547da
def __init__(self, cfg): ' extractor regional features from the shared conv layers ' super(FeatExtractor, self).__init__() self.cfg = cfg (self.out_height, self.out_width) = self.cfg.MODEL.FM_ROI_POOL_SIZE model_info = model_factory[self.cfg.MODEL.BACKBONE] self.feat_name_list = self.cfg.MODEL.FM_FEAT_NAME_LIST self.nc_list = model_info.infer_channels(self.feat_name_list) self.stride_list = model_info.infer_strides(self.feat_name_list) self.out_channel = int(sum(self.nc_list)) self.use_bn = self.cfg.MODEL.FM_ROI_FEAT_BN if self.use_bn: self.bn_list = torch.nn.ModuleList([torch.nn.BatchNorm2d(nc) for nc in self.nc_list])
extractor regional features from the shared conv layers
siam_tracker/spm_tracker/modules/spm_tracker_fm.py
__init__
Bhaskers-Blu-Org2/SPM-Tracker
32
python
def __init__(self, cfg): ' ' super(FeatExtractor, self).__init__() self.cfg = cfg (self.out_height, self.out_width) = self.cfg.MODEL.FM_ROI_POOL_SIZE model_info = model_factory[self.cfg.MODEL.BACKBONE] self.feat_name_list = self.cfg.MODEL.FM_FEAT_NAME_LIST self.nc_list = model_info.infer_channels(self.feat_name_list) self.stride_list = model_info.infer_strides(self.feat_name_list) self.out_channel = int(sum(self.nc_list)) self.use_bn = self.cfg.MODEL.FM_ROI_FEAT_BN if self.use_bn: self.bn_list = torch.nn.ModuleList([torch.nn.BatchNorm2d(nc) for nc in self.nc_list])
def __init__(self, cfg): ' ' super(FeatExtractor, self).__init__() self.cfg = cfg (self.out_height, self.out_width) = self.cfg.MODEL.FM_ROI_POOL_SIZE model_info = model_factory[self.cfg.MODEL.BACKBONE] self.feat_name_list = self.cfg.MODEL.FM_FEAT_NAME_LIST self.nc_list = model_info.infer_channels(self.feat_name_list) self.stride_list = model_info.infer_strides(self.feat_name_list) self.out_channel = int(sum(self.nc_list)) self.use_bn = self.cfg.MODEL.FM_ROI_FEAT_BN if self.use_bn: self.bn_list = torch.nn.ModuleList([torch.nn.BatchNorm2d(nc) for nc in self.nc_list])<|docstring|>extractor regional features from the shared conv layers<|endoftext|>
1d027c3b846ccd95d840e34b71c472972bf6930fdd32f31827fbf21ff81707b5
def get_random_user_agent(): '\n Get a random user agent string.\n\n :rtype: str\n :return: Random user agent string.\n ' return random.choice(user_agents_list)
Get a random user agent string. :rtype: str :return: Random user agent string.
googlesearch/sometest.py
get_random_user_agent
Octoberr/swm0920
2
python
def get_random_user_agent(): '\n Get a random user agent string.\n\n :rtype: str\n :return: Random user agent string.\n ' return random.choice(user_agents_list)
def get_random_user_agent(): '\n Get a random user agent string.\n\n :rtype: str\n :return: Random user agent string.\n ' return random.choice(user_agents_list)<|docstring|>Get a random user agent string. :rtype: str :return: Random user agent string.<|endoftext|>
efd4c2ed2fb78f80f50e7791e028cf71948c3483dd38576a61fd1c91921b8bdf
@staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' TranslationTask.add_args(parser) parser.add_argument('--noise', default='random_delete', choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
Add task-specific arguments to the parser.
fairseq/tasks/translation_lev.py
add_args
wangqi1996/cmlm
0
python
@staticmethod def add_args(parser): TranslationTask.add_args(parser) parser.add_argument('--noise', default='random_delete', choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
@staticmethod def add_args(parser): TranslationTask.add_args(parser) parser.add_argument('--noise', default='random_delete', choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])<|docstring|>Add task-specific arguments to the parser.<|endoftext|>
15b61e04609fecf0dbf09f342efe6d11e3795ec6fc9eb8d58c979b22dba449a3
def load_dataset(self, split, epoch=1, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] (src, tgt) = (self.args.source_lang, self.args.target_lang) self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, prepend_bos=True)
Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test)
fairseq/tasks/translation_lev.py
load_dataset
wangqi1996/cmlm
0
python
def load_dataset(self, split, epoch=1, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] (src, tgt) = (self.args.source_lang, self.args.target_lang) self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, prepend_bos=True)
def load_dataset(self, split, epoch=1, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] (src, tgt) = (self.args.source_lang, self.args.target_lang) self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, prepend_bos=True)<|docstring|>Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test)<|endoftext|>
31d1873574d35758070d4fcad1c7dbf1733f05cc6e4e0df93e3093ad9f004c99
def fit(time_event, censoring, labx, verbose=3): "\n\n Parameters\n ----------\n time_event: [FLOAT]: Numpy array with survival-time in years/months/days (not a datetime!)\n\n censoring: [INT]: numpy array with censoring (1=event, 0=ongoing)\n At the time you want to make inferences about durations, it is possible, likely true, that not all the death events have occured yet. \n In case of patients, you would like to put 1=death as an event.\n \n labx: [INT] or [STRING]: numpy array with class labels. Each class label is seperately plotted\n\n verbose: [INT] Print messages to screen.\n 0: NONE\n 1: ERROR\n 2: WARNING\n 3: INFO (default)\n 4: DEBUG\n 5: TRACE\n\n Returns\n -------\n Dictionary containing logrank pvalue and keys required to make plots.\n\n\n Example\n ----------\n import kaplanmeier as km\n df= km.example_data()\n out=km.fit(df['time'], df['Died'], df['group'])\n km.plot(out)\n\n km.plot(out, cmap='Set1', cii_lines=True, cii_alpha=0.05)\n km.plot(out, cmap=[(1, 0, 0),(0, 0, 1)])\n km.plot(out, cmap='Set1', methodtype='custom')\n\n out['logrank_P']\n out['logrank_Z']\n\n " if ('pandas' in str(type(time_event))): if (verbose <= 2): print('[KM] Input data <time_event> must be of type numpy array or list. Converting now..') time_event = time_event.astype(float).values if ('pandas' in str(type(censoring))): if (verbose <= 2): print('[KM] Input data <censoring> must be of type numpy array or list. Converting now..') censoring = censoring.astype(int).values out_lr = {} p_value = np.nan test_statistic = np.nan uilabx = np.unique(labx) if (len(uilabx) == 2): alpha = 0.05 class1 = (labx == uilabx[0]) class2 = (labx == uilabx[1]) out_lr = logrank_test(time_event[class1], time_event[class2], censoring[class1], censoring[class2], alpha=(1 - alpha)) p_value = out_lr.p_value test_statistic = out_lr.test_statistic out = dict() out['logrank_P'] = p_value out['logrank_Z'] = test_statistic out['logrank'] = out_lr out['labx'] = labx out['uilabx'] = uilabx out['time_event'] = time_event out['censoring'] = censoring return out
Parameters ---------- time_event: [FLOAT]: Numpy array with survival-time in years/months/days (not a datetime!) censoring: [INT]: numpy array with censoring (1=event, 0=ongoing) At the time you want to make inferences about durations, it is possible, likely true, that not all the death events have occured yet. In case of patients, you would like to put 1=death as an event. labx: [INT] or [STRING]: numpy array with class labels. Each class label is seperately plotted verbose: [INT] Print messages to screen. 0: NONE 1: ERROR 2: WARNING 3: INFO (default) 4: DEBUG 5: TRACE Returns ------- Dictionary containing logrank pvalue and keys required to make plots. Example ---------- import kaplanmeier as km df= km.example_data() out=km.fit(df['time'], df['Died'], df['group']) km.plot(out) km.plot(out, cmap='Set1', cii_lines=True, cii_alpha=0.05) km.plot(out, cmap=[(1, 0, 0),(0, 0, 1)]) km.plot(out, cmap='Set1', methodtype='custom') out['logrank_P'] out['logrank_Z']
kaplanmeier/kaplanmeier.py
fit
bskienhvqy/kaplanmeier
0
python
def fit(time_event, censoring, labx, verbose=3): "\n\n Parameters\n ----------\n time_event: [FLOAT]: Numpy array with survival-time in years/months/days (not a datetime!)\n\n censoring: [INT]: numpy array with censoring (1=event, 0=ongoing)\n At the time you want to make inferences about durations, it is possible, likely true, that not all the death events have occured yet. \n In case of patients, you would like to put 1=death as an event.\n \n labx: [INT] or [STRING]: numpy array with class labels. Each class label is seperately plotted\n\n verbose: [INT] Print messages to screen.\n 0: NONE\n 1: ERROR\n 2: WARNING\n 3: INFO (default)\n 4: DEBUG\n 5: TRACE\n\n Returns\n -------\n Dictionary containing logrank pvalue and keys required to make plots.\n\n\n Example\n ----------\n import kaplanmeier as km\n df= km.example_data()\n out=km.fit(df['time'], df['Died'], df['group'])\n km.plot(out)\n\n km.plot(out, cmap='Set1', cii_lines=True, cii_alpha=0.05)\n km.plot(out, cmap=[(1, 0, 0),(0, 0, 1)])\n km.plot(out, cmap='Set1', methodtype='custom')\n\n out['logrank_P']\n out['logrank_Z']\n\n " if ('pandas' in str(type(time_event))): if (verbose <= 2): print('[KM] Input data <time_event> must be of type numpy array or list. Converting now..') time_event = time_event.astype(float).values if ('pandas' in str(type(censoring))): if (verbose <= 2): print('[KM] Input data <censoring> must be of type numpy array or list. Converting now..') censoring = censoring.astype(int).values out_lr = {} p_value = np.nan test_statistic = np.nan uilabx = np.unique(labx) if (len(uilabx) == 2): alpha = 0.05 class1 = (labx == uilabx[0]) class2 = (labx == uilabx[1]) out_lr = logrank_test(time_event[class1], time_event[class2], censoring[class1], censoring[class2], alpha=(1 - alpha)) p_value = out_lr.p_value test_statistic = out_lr.test_statistic out = dict() out['logrank_P'] = p_value out['logrank_Z'] = test_statistic out['logrank'] = out_lr out['labx'] = labx out['uilabx'] = uilabx out['time_event'] = time_event out['censoring'] = censoring return out
def fit(time_event, censoring, labx, verbose=3): "\n\n Parameters\n ----------\n time_event: [FLOAT]: Numpy array with survival-time in years/months/days (not a datetime!)\n\n censoring: [INT]: numpy array with censoring (1=event, 0=ongoing)\n At the time you want to make inferences about durations, it is possible, likely true, that not all the death events have occured yet. \n In case of patients, you would like to put 1=death as an event.\n \n labx: [INT] or [STRING]: numpy array with class labels. Each class label is seperately plotted\n\n verbose: [INT] Print messages to screen.\n 0: NONE\n 1: ERROR\n 2: WARNING\n 3: INFO (default)\n 4: DEBUG\n 5: TRACE\n\n Returns\n -------\n Dictionary containing logrank pvalue and keys required to make plots.\n\n\n Example\n ----------\n import kaplanmeier as km\n df= km.example_data()\n out=km.fit(df['time'], df['Died'], df['group'])\n km.plot(out)\n\n km.plot(out, cmap='Set1', cii_lines=True, cii_alpha=0.05)\n km.plot(out, cmap=[(1, 0, 0),(0, 0, 1)])\n km.plot(out, cmap='Set1', methodtype='custom')\n\n out['logrank_P']\n out['logrank_Z']\n\n " if ('pandas' in str(type(time_event))): if (verbose <= 2): print('[KM] Input data <time_event> must be of type numpy array or list. Converting now..') time_event = time_event.astype(float).values if ('pandas' in str(type(censoring))): if (verbose <= 2): print('[KM] Input data <censoring> must be of type numpy array or list. Converting now..') censoring = censoring.astype(int).values out_lr = {} p_value = np.nan test_statistic = np.nan uilabx = np.unique(labx) if (len(uilabx) == 2): alpha = 0.05 class1 = (labx == uilabx[0]) class2 = (labx == uilabx[1]) out_lr = logrank_test(time_event[class1], time_event[class2], censoring[class1], censoring[class2], alpha=(1 - alpha)) p_value = out_lr.p_value test_statistic = out_lr.test_statistic out = dict() out['logrank_P'] = p_value out['logrank_Z'] = test_statistic out['logrank'] = out_lr out['labx'] = labx out['uilabx'] = uilabx out['time_event'] = time_event out['censoring'] = censoring return out<|docstring|>Parameters ---------- time_event: [FLOAT]: Numpy array with survival-time in years/months/days (not a datetime!) censoring: [INT]: numpy array with censoring (1=event, 0=ongoing) At the time you want to make inferences about durations, it is possible, likely true, that not all the death events have occured yet. In case of patients, you would like to put 1=death as an event. labx: [INT] or [STRING]: numpy array with class labels. Each class label is seperately plotted verbose: [INT] Print messages to screen. 0: NONE 1: ERROR 2: WARNING 3: INFO (default) 4: DEBUG 5: TRACE Returns ------- Dictionary containing logrank pvalue and keys required to make plots. Example ---------- import kaplanmeier as km df= km.example_data() out=km.fit(df['time'], df['Died'], df['group']) km.plot(out) km.plot(out, cmap='Set1', cii_lines=True, cii_alpha=0.05) km.plot(out, cmap=[(1, 0, 0),(0, 0, 1)]) km.plot(out, cmap='Set1', methodtype='custom') out['logrank_P'] out['logrank_Z']<|endoftext|>
5a0599f34e9331a9d63bfee193ccf5de478b2d46cd8f37f637308505772e68e0
def plot(out, fontsize=12, savepath='', width=10, height=6, cmap='Set1', cii_alpha=0.05, cii_lines='dense', methodtype='lifeline', title='Survival function', full_ylim=False, y_percentage=False): "\n \n\n Parameters\n ----------\n out : [dict] Dictionary derived from the fit function.\n\n fontsize : [INT], Font size for the graph\n default is 12.\n \n savepath: [STRING], Path to store the figure\n\n width: [INT], Width of the figure\n 10 (default)\n \n height: [INT], Width of the figure\n 6 (default)\n\n cmap: [STRING], Specify your own colors for each class-label or use a colormap: https://matplotlib.org/examples/color/colormaps_reference.html\n [(1, 0, 0),(0, 0, 1),(..)]\n 'Set1' (default) \n 'Set2' Discrete colors\n 'Pastel1' Discrete colors\n 'Paired' Discrete colors\n 'rainbow'\n 'bwr' Blue-white-red\n 'binary' or 'binary_r'\n 'seismic' Blue-white-red \n 'Blues' white-to-blue\n 'Reds' white-to-red\n\n cii_alpha: [FLOAT], Confidence interval (works only when methodtype='lifelines')\n 0.05 (default)\n \n cii_lines: [STRING], Confidence lines (works only when methodtype='lifelines')\n 'lifelines' (default)\n 'custom'\n\n methodtype: [STRING], Implementation type\n 'dense' (dense/filled lines)\n 'line' \n None (no lines)\n\n Returns\n -------\n None.\n\n " KMcoord = {} Param = {} Param['width'] = width Param['height'] = height Param['fontsize'] = fontsize Param['savepath'] = savepath labx = out['labx'] data = np.vstack((out['time_event'], out['censoring'])).T [class_colors, classlabel] = make_class_color_names(data, out['labx'], out['uilabx'], cmap=cmap) if (methodtype == 'lifeline'): kmf_all = [] fig = plt.figure(figsize=(Param['width'], Param['height'])) ax = fig.add_subplot(111) if full_ylim: ax.set_ylim([0.0, 1.05]) if y_percentage: ax.yaxis.set_major_formatter(PercentFormatter(1.0)) if (out['logrank'] != []): plt.title(('%s, Logrank Test P-Value = %.5f' % (title, out['logrank_P']))) if (cii_lines == 'dense'): cii_lines = False if (cii_lines == 'line'): cii_lines = True if ((cii_lines == '') or (cii_lines == None) or (cii_alpha == None)): cii_lines = False cii_alpha = 0 for i in range(0, len(out['uilabx'])): kmf = KaplanMeierFitter() idx = np.where((labx == out['uilabx'][i]))[0] kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha)) kmf.plot(ax=ax, ci_force_lines=cii_lines, color=class_colors[i], show_censors=True) kmf_all.append(kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha))) if (len(kmf_all) == 1): add_at_risk_counts(kmf_all[0], ax=ax) elif (len(kmf_all) == 2): add_at_risk_counts(kmf_all[0], kmf_all[1], ax=ax) elif (len(kmf_all) == 3): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], ax=ax) elif (len(kmf_all) == 4): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], ax=ax) elif (len(kmf_all) == 5): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], ax=ax) elif (len(kmf_all) == 6): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], ax=ax) elif (len(kmf_all) == 7): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], ax=ax) elif (len(kmf_all) == 8): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], ax=ax) elif (len(kmf_all) == 9): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], ax=ax) elif (len(kmf_all) == 10): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], kmf_all[9], ax=ax) else: print('[KM] Maximum of 10 classes is reached.') ax.tick_params(axis='x', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.tick_params(axis='y', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.spines['bottom'].set_position(['outward', Param['fontsize']]) ax.spines['left'].set_position(['outward', Param['fontsize']]) if (Param['savepath'] != ''): savefig(fig, Param['savepath']) if (methodtype == 'custom'): for i in range(0, len(out['uilabx'])): idx = np.where((labx == out['uilabx'][i]))[0] tmpdata = data[(idx, :)].tolist() KMcoord[i] = compute_coord(tmpdata) plotkm(KMcoord, classlabel, cmap=class_colors, width=Param['width'], height=Param['height'], fontsize=Param['fontsize'])
Parameters ---------- out : [dict] Dictionary derived from the fit function. fontsize : [INT], Font size for the graph default is 12. savepath: [STRING], Path to store the figure width: [INT], Width of the figure 10 (default) height: [INT], Width of the figure 6 (default) cmap: [STRING], Specify your own colors for each class-label or use a colormap: https://matplotlib.org/examples/color/colormaps_reference.html [(1, 0, 0),(0, 0, 1),(..)] 'Set1' (default) 'Set2' Discrete colors 'Pastel1' Discrete colors 'Paired' Discrete colors 'rainbow' 'bwr' Blue-white-red 'binary' or 'binary_r' 'seismic' Blue-white-red 'Blues' white-to-blue 'Reds' white-to-red cii_alpha: [FLOAT], Confidence interval (works only when methodtype='lifelines') 0.05 (default) cii_lines: [STRING], Confidence lines (works only when methodtype='lifelines') 'lifelines' (default) 'custom' methodtype: [STRING], Implementation type 'dense' (dense/filled lines) 'line' None (no lines) Returns ------- None.
kaplanmeier/kaplanmeier.py
plot
bskienhvqy/kaplanmeier
0
python
def plot(out, fontsize=12, savepath=, width=10, height=6, cmap='Set1', cii_alpha=0.05, cii_lines='dense', methodtype='lifeline', title='Survival function', full_ylim=False, y_percentage=False): "\n \n\n Parameters\n ----------\n out : [dict] Dictionary derived from the fit function.\n\n fontsize : [INT], Font size for the graph\n default is 12.\n \n savepath: [STRING], Path to store the figure\n\n width: [INT], Width of the figure\n 10 (default)\n \n height: [INT], Width of the figure\n 6 (default)\n\n cmap: [STRING], Specify your own colors for each class-label or use a colormap: https://matplotlib.org/examples/color/colormaps_reference.html\n [(1, 0, 0),(0, 0, 1),(..)]\n 'Set1' (default) \n 'Set2' Discrete colors\n 'Pastel1' Discrete colors\n 'Paired' Discrete colors\n 'rainbow'\n 'bwr' Blue-white-red\n 'binary' or 'binary_r'\n 'seismic' Blue-white-red \n 'Blues' white-to-blue\n 'Reds' white-to-red\n\n cii_alpha: [FLOAT], Confidence interval (works only when methodtype='lifelines')\n 0.05 (default)\n \n cii_lines: [STRING], Confidence lines (works only when methodtype='lifelines')\n 'lifelines' (default)\n 'custom'\n\n methodtype: [STRING], Implementation type\n 'dense' (dense/filled lines)\n 'line' \n None (no lines)\n\n Returns\n -------\n None.\n\n " KMcoord = {} Param = {} Param['width'] = width Param['height'] = height Param['fontsize'] = fontsize Param['savepath'] = savepath labx = out['labx'] data = np.vstack((out['time_event'], out['censoring'])).T [class_colors, classlabel] = make_class_color_names(data, out['labx'], out['uilabx'], cmap=cmap) if (methodtype == 'lifeline'): kmf_all = [] fig = plt.figure(figsize=(Param['width'], Param['height'])) ax = fig.add_subplot(111) if full_ylim: ax.set_ylim([0.0, 1.05]) if y_percentage: ax.yaxis.set_major_formatter(PercentFormatter(1.0)) if (out['logrank'] != []): plt.title(('%s, Logrank Test P-Value = %.5f' % (title, out['logrank_P']))) if (cii_lines == 'dense'): cii_lines = False if (cii_lines == 'line'): cii_lines = True if ((cii_lines == ) or (cii_lines == None) or (cii_alpha == None)): cii_lines = False cii_alpha = 0 for i in range(0, len(out['uilabx'])): kmf = KaplanMeierFitter() idx = np.where((labx == out['uilabx'][i]))[0] kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha)) kmf.plot(ax=ax, ci_force_lines=cii_lines, color=class_colors[i], show_censors=True) kmf_all.append(kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha))) if (len(kmf_all) == 1): add_at_risk_counts(kmf_all[0], ax=ax) elif (len(kmf_all) == 2): add_at_risk_counts(kmf_all[0], kmf_all[1], ax=ax) elif (len(kmf_all) == 3): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], ax=ax) elif (len(kmf_all) == 4): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], ax=ax) elif (len(kmf_all) == 5): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], ax=ax) elif (len(kmf_all) == 6): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], ax=ax) elif (len(kmf_all) == 7): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], ax=ax) elif (len(kmf_all) == 8): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], ax=ax) elif (len(kmf_all) == 9): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], ax=ax) elif (len(kmf_all) == 10): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], kmf_all[9], ax=ax) else: print('[KM] Maximum of 10 classes is reached.') ax.tick_params(axis='x', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.tick_params(axis='y', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.spines['bottom'].set_position(['outward', Param['fontsize']]) ax.spines['left'].set_position(['outward', Param['fontsize']]) if (Param['savepath'] != ): savefig(fig, Param['savepath']) if (methodtype == 'custom'): for i in range(0, len(out['uilabx'])): idx = np.where((labx == out['uilabx'][i]))[0] tmpdata = data[(idx, :)].tolist() KMcoord[i] = compute_coord(tmpdata) plotkm(KMcoord, classlabel, cmap=class_colors, width=Param['width'], height=Param['height'], fontsize=Param['fontsize'])
def plot(out, fontsize=12, savepath=, width=10, height=6, cmap='Set1', cii_alpha=0.05, cii_lines='dense', methodtype='lifeline', title='Survival function', full_ylim=False, y_percentage=False): "\n \n\n Parameters\n ----------\n out : [dict] Dictionary derived from the fit function.\n\n fontsize : [INT], Font size for the graph\n default is 12.\n \n savepath: [STRING], Path to store the figure\n\n width: [INT], Width of the figure\n 10 (default)\n \n height: [INT], Width of the figure\n 6 (default)\n\n cmap: [STRING], Specify your own colors for each class-label or use a colormap: https://matplotlib.org/examples/color/colormaps_reference.html\n [(1, 0, 0),(0, 0, 1),(..)]\n 'Set1' (default) \n 'Set2' Discrete colors\n 'Pastel1' Discrete colors\n 'Paired' Discrete colors\n 'rainbow'\n 'bwr' Blue-white-red\n 'binary' or 'binary_r'\n 'seismic' Blue-white-red \n 'Blues' white-to-blue\n 'Reds' white-to-red\n\n cii_alpha: [FLOAT], Confidence interval (works only when methodtype='lifelines')\n 0.05 (default)\n \n cii_lines: [STRING], Confidence lines (works only when methodtype='lifelines')\n 'lifelines' (default)\n 'custom'\n\n methodtype: [STRING], Implementation type\n 'dense' (dense/filled lines)\n 'line' \n None (no lines)\n\n Returns\n -------\n None.\n\n " KMcoord = {} Param = {} Param['width'] = width Param['height'] = height Param['fontsize'] = fontsize Param['savepath'] = savepath labx = out['labx'] data = np.vstack((out['time_event'], out['censoring'])).T [class_colors, classlabel] = make_class_color_names(data, out['labx'], out['uilabx'], cmap=cmap) if (methodtype == 'lifeline'): kmf_all = [] fig = plt.figure(figsize=(Param['width'], Param['height'])) ax = fig.add_subplot(111) if full_ylim: ax.set_ylim([0.0, 1.05]) if y_percentage: ax.yaxis.set_major_formatter(PercentFormatter(1.0)) if (out['logrank'] != []): plt.title(('%s, Logrank Test P-Value = %.5f' % (title, out['logrank_P']))) if (cii_lines == 'dense'): cii_lines = False if (cii_lines == 'line'): cii_lines = True if ((cii_lines == ) or (cii_lines == None) or (cii_alpha == None)): cii_lines = False cii_alpha = 0 for i in range(0, len(out['uilabx'])): kmf = KaplanMeierFitter() idx = np.where((labx == out['uilabx'][i]))[0] kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha)) kmf.plot(ax=ax, ci_force_lines=cii_lines, color=class_colors[i], show_censors=True) kmf_all.append(kmf.fit(out['time_event'][idx], event_observed=out['censoring'][idx], label=classlabel[i], ci_labels=None, alpha=(1 - cii_alpha))) if (len(kmf_all) == 1): add_at_risk_counts(kmf_all[0], ax=ax) elif (len(kmf_all) == 2): add_at_risk_counts(kmf_all[0], kmf_all[1], ax=ax) elif (len(kmf_all) == 3): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], ax=ax) elif (len(kmf_all) == 4): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], ax=ax) elif (len(kmf_all) == 5): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], ax=ax) elif (len(kmf_all) == 6): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], ax=ax) elif (len(kmf_all) == 7): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], ax=ax) elif (len(kmf_all) == 8): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], ax=ax) elif (len(kmf_all) == 9): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], ax=ax) elif (len(kmf_all) == 10): add_at_risk_counts(kmf_all[0], kmf_all[1], kmf_all[2], kmf_all[3], kmf_all[4], kmf_all[5], kmf_all[6], kmf_all[7], kmf_all[8], kmf_all[9], ax=ax) else: print('[KM] Maximum of 10 classes is reached.') ax.tick_params(axis='x', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.tick_params(axis='y', length=15, width=1, direction='out', labelsize=Param['fontsize']) ax.spines['bottom'].set_position(['outward', Param['fontsize']]) ax.spines['left'].set_position(['outward', Param['fontsize']]) if (Param['savepath'] != ): savefig(fig, Param['savepath']) if (methodtype == 'custom'): for i in range(0, len(out['uilabx'])): idx = np.where((labx == out['uilabx'][i]))[0] tmpdata = data[(idx, :)].tolist() KMcoord[i] = compute_coord(tmpdata) plotkm(KMcoord, classlabel, cmap=class_colors, width=Param['width'], height=Param['height'], fontsize=Param['fontsize'])<|docstring|>Parameters ---------- out : [dict] Dictionary derived from the fit function. fontsize : [INT], Font size for the graph default is 12. savepath: [STRING], Path to store the figure width: [INT], Width of the figure 10 (default) height: [INT], Width of the figure 6 (default) cmap: [STRING], Specify your own colors for each class-label or use a colormap: https://matplotlib.org/examples/color/colormaps_reference.html [(1, 0, 0),(0, 0, 1),(..)] 'Set1' (default) 'Set2' Discrete colors 'Pastel1' Discrete colors 'Paired' Discrete colors 'rainbow' 'bwr' Blue-white-red 'binary' or 'binary_r' 'seismic' Blue-white-red 'Blues' white-to-blue 'Reds' white-to-red cii_alpha: [FLOAT], Confidence interval (works only when methodtype='lifelines') 0.05 (default) cii_lines: [STRING], Confidence lines (works only when methodtype='lifelines') 'lifelines' (default) 'custom' methodtype: [STRING], Implementation type 'dense' (dense/filled lines) 'line' None (no lines) Returns ------- None.<|endoftext|>
260e662e114f7d74ffff36688b586708f19de9e35002c8c7ce523f03b889ccf9
def test_pac_callstack_limit(self): '\n Try to load a PAC file that hits the Duktape call stack limit.\n ' pac_js = 'function FindProxyForURL(url, host) {function b() {a();} function a() {b();}; a(); return "DIRECT";}' with pytest.raises(MalformedPacError) as e: PACFile(pac_js) assert ('callstack limit' in str(e.value))
Try to load a PAC file that hits the Duktape call stack limit.
tests/test_parser.py
test_pac_callstack_limit
zanachka/pypac
47
python
def test_pac_callstack_limit(self): '\n \n ' pac_js = 'function FindProxyForURL(url, host) {function b() {a();} function a() {b();}; a(); return "DIRECT";}' with pytest.raises(MalformedPacError) as e: PACFile(pac_js) assert ('callstack limit' in str(e.value))
def test_pac_callstack_limit(self): '\n \n ' pac_js = 'function FindProxyForURL(url, host) {function b() {a();} function a() {b();}; a(); return "DIRECT";}' with pytest.raises(MalformedPacError) as e: PACFile(pac_js) assert ('callstack limit' in str(e.value))<|docstring|>Try to load a PAC file that hits the Duktape call stack limit.<|endoftext|>
ca55fb85c1750dd5d46dcd860543218be6698f681004f7c112c682235e27b692
def test_dnsResolve_propagation(self): '\n dnsResolve must return an empty string now we use dukpy, otherwise\n None value causes dukpy error as it propagates\n ' parser = PACFile((dummy_js % 'isInNet(dnsResolve(host), "10.1.1.0", "255.255.255.0")')) assert (parser.find_proxy_for_url('$%$', '$%$') == 'PROXY 0.0.0.0:80')
dnsResolve must return an empty string now we use dukpy, otherwise None value causes dukpy error as it propagates
tests/test_parser.py
test_dnsResolve_propagation
zanachka/pypac
47
python
def test_dnsResolve_propagation(self): '\n dnsResolve must return an empty string now we use dukpy, otherwise\n None value causes dukpy error as it propagates\n ' parser = PACFile((dummy_js % 'isInNet(dnsResolve(host), "10.1.1.0", "255.255.255.0")')) assert (parser.find_proxy_for_url('$%$', '$%$') == 'PROXY 0.0.0.0:80')
def test_dnsResolve_propagation(self): '\n dnsResolve must return an empty string now we use dukpy, otherwise\n None value causes dukpy error as it propagates\n ' parser = PACFile((dummy_js % 'isInNet(dnsResolve(host), "10.1.1.0", "255.255.255.0")')) assert (parser.find_proxy_for_url('$%$', '$%$') == 'PROXY 0.0.0.0:80')<|docstring|>dnsResolve must return an empty string now we use dukpy, otherwise None value causes dukpy error as it propagates<|endoftext|>
d955afa5b1d345dc41899c99260d66d5fd1d963c2ec3a7efc727500a74c8146d
def is_initial_tag(self, current_tag_version: str, is_yes: bool=False) -> bool: 'Check if reading the whole git tree up to HEAD is needed.' is_initial = False if (not git.tag_exist(current_tag_version)): if is_yes: is_initial = True else: out.info(f'Tag {current_tag_version} could not be found. ') out.info("Possible causes:\n- version in configuration is not the current version\n- tag_format is missing, check them using 'git tag --list'\n") is_initial = questionary.confirm('Is this the first tag created?').ask() return is_initial
Check if reading the whole git tree up to HEAD is needed.
commitizen/commands/bump.py
is_initial_tag
dariemp/commitizen
0
python
def is_initial_tag(self, current_tag_version: str, is_yes: bool=False) -> bool: is_initial = False if (not git.tag_exist(current_tag_version)): if is_yes: is_initial = True else: out.info(f'Tag {current_tag_version} could not be found. ') out.info("Possible causes:\n- version in configuration is not the current version\n- tag_format is missing, check them using 'git tag --list'\n") is_initial = questionary.confirm('Is this the first tag created?').ask() return is_initial
def is_initial_tag(self, current_tag_version: str, is_yes: bool=False) -> bool: is_initial = False if (not git.tag_exist(current_tag_version)): if is_yes: is_initial = True else: out.info(f'Tag {current_tag_version} could not be found. ') out.info("Possible causes:\n- version in configuration is not the current version\n- tag_format is missing, check them using 'git tag --list'\n") is_initial = questionary.confirm('Is this the first tag created?').ask() return is_initial<|docstring|>Check if reading the whole git tree up to HEAD is needed.<|endoftext|>
88ed0bec0dc4221d2748c3a2803b3753e948505d0ef1b9c939d4011f897c8b60
def __call__(self): 'Steps executed to bump.' try: current_version_instance: Version = Version(self.parameters['version']) except TypeError: out.error('[NO_VERSION_SPECIFIED]') out.error('Check if current version is specified in config file, like:') out.error('version = 0.4.3') raise SystemExit(NO_VERSION_SPECIFIED) current_version: str = self.config['version'] tag_format: str = self.parameters['tag_format'] bump_commit_message: str = self.parameters['bump_message'] current_tag_version: str = bump.create_tag(current_version, tag_format=tag_format) files: list = self.parameters['files'] dry_run: bool = self.parameters['dry_run'] is_yes: bool = self.arguments['yes'] prerelease: str = self.arguments['prerelease'] increment: Optional[str] = self.arguments['increment'] is_files_only: Optional[bool] = self.arguments['files_only'] is_initial = self.is_initial_tag(current_tag_version, is_yes) commits = git.get_commits(current_tag_version, from_beginning=is_initial) if ((not commits) and (not current_version_instance.is_prerelease)): out.error('[NO_COMMITS_FOUND]') out.error('No new commits found.') raise SystemExit(NO_COMMITS_FOUND) if (increment is None): increment = self.find_increment(commits) if (prerelease and current_version_instance.is_prerelease): increment = None new_version = bump.generate_version(current_version, increment, prerelease=prerelease) new_tag_version = bump.create_tag(new_version, tag_format=tag_format) message = bump.create_commit_message(current_version, new_version, bump_commit_message) out.write(message) out.write(f'tag to create: {new_tag_version}') out.write(f'increment detected: {increment}') if dry_run: raise SystemExit() bump.update_version_in_files(current_version, new_version.public, files) if is_files_only: raise SystemExit() config.set_key('version', new_version.public) c = git.commit(message, args='-a') if c.err: out.error('git.commit errror: "{}"'.format(c.err.strip())) raise SystemExit(COMMIT_FAILED) c = git.tag(new_tag_version) if c.err: out.error(c.err) raise SystemExit(TAG_FAILED) out.success('Done!')
Steps executed to bump.
commitizen/commands/bump.py
__call__
dariemp/commitizen
0
python
def __call__(self): try: current_version_instance: Version = Version(self.parameters['version']) except TypeError: out.error('[NO_VERSION_SPECIFIED]') out.error('Check if current version is specified in config file, like:') out.error('version = 0.4.3') raise SystemExit(NO_VERSION_SPECIFIED) current_version: str = self.config['version'] tag_format: str = self.parameters['tag_format'] bump_commit_message: str = self.parameters['bump_message'] current_tag_version: str = bump.create_tag(current_version, tag_format=tag_format) files: list = self.parameters['files'] dry_run: bool = self.parameters['dry_run'] is_yes: bool = self.arguments['yes'] prerelease: str = self.arguments['prerelease'] increment: Optional[str] = self.arguments['increment'] is_files_only: Optional[bool] = self.arguments['files_only'] is_initial = self.is_initial_tag(current_tag_version, is_yes) commits = git.get_commits(current_tag_version, from_beginning=is_initial) if ((not commits) and (not current_version_instance.is_prerelease)): out.error('[NO_COMMITS_FOUND]') out.error('No new commits found.') raise SystemExit(NO_COMMITS_FOUND) if (increment is None): increment = self.find_increment(commits) if (prerelease and current_version_instance.is_prerelease): increment = None new_version = bump.generate_version(current_version, increment, prerelease=prerelease) new_tag_version = bump.create_tag(new_version, tag_format=tag_format) message = bump.create_commit_message(current_version, new_version, bump_commit_message) out.write(message) out.write(f'tag to create: {new_tag_version}') out.write(f'increment detected: {increment}') if dry_run: raise SystemExit() bump.update_version_in_files(current_version, new_version.public, files) if is_files_only: raise SystemExit() config.set_key('version', new_version.public) c = git.commit(message, args='-a') if c.err: out.error('git.commit errror: "{}"'.format(c.err.strip())) raise SystemExit(COMMIT_FAILED) c = git.tag(new_tag_version) if c.err: out.error(c.err) raise SystemExit(TAG_FAILED) out.success('Done!')
def __call__(self): try: current_version_instance: Version = Version(self.parameters['version']) except TypeError: out.error('[NO_VERSION_SPECIFIED]') out.error('Check if current version is specified in config file, like:') out.error('version = 0.4.3') raise SystemExit(NO_VERSION_SPECIFIED) current_version: str = self.config['version'] tag_format: str = self.parameters['tag_format'] bump_commit_message: str = self.parameters['bump_message'] current_tag_version: str = bump.create_tag(current_version, tag_format=tag_format) files: list = self.parameters['files'] dry_run: bool = self.parameters['dry_run'] is_yes: bool = self.arguments['yes'] prerelease: str = self.arguments['prerelease'] increment: Optional[str] = self.arguments['increment'] is_files_only: Optional[bool] = self.arguments['files_only'] is_initial = self.is_initial_tag(current_tag_version, is_yes) commits = git.get_commits(current_tag_version, from_beginning=is_initial) if ((not commits) and (not current_version_instance.is_prerelease)): out.error('[NO_COMMITS_FOUND]') out.error('No new commits found.') raise SystemExit(NO_COMMITS_FOUND) if (increment is None): increment = self.find_increment(commits) if (prerelease and current_version_instance.is_prerelease): increment = None new_version = bump.generate_version(current_version, increment, prerelease=prerelease) new_tag_version = bump.create_tag(new_version, tag_format=tag_format) message = bump.create_commit_message(current_version, new_version, bump_commit_message) out.write(message) out.write(f'tag to create: {new_tag_version}') out.write(f'increment detected: {increment}') if dry_run: raise SystemExit() bump.update_version_in_files(current_version, new_version.public, files) if is_files_only: raise SystemExit() config.set_key('version', new_version.public) c = git.commit(message, args='-a') if c.err: out.error('git.commit errror: "{}"'.format(c.err.strip())) raise SystemExit(COMMIT_FAILED) c = git.tag(new_tag_version) if c.err: out.error(c.err) raise SystemExit(TAG_FAILED) out.success('Done!')<|docstring|>Steps executed to bump.<|endoftext|>
8ad00375b7250ab282e7c1cbb308ad6f7a057c745d09fa48f0d5a5d21435cefb
def __init__(self, manager: PathToolManagerT): '\n Args:\n manager: Description\n ' super(AbstractPathTool, self).__init__(None) self.manager: PathToolManagerT = manager self._window: DocT = manager.window self._active: bool = False self._last_location = None self._rect: QRectF = _RECT self._pen: QPen = _PEN self.hide()
Args: manager: Description
cadnano/views/pathview/tools/abstractpathtool.py
__init__
sherwoodyao/cadnano2.5
69
python
def __init__(self, manager: PathToolManagerT): '\n Args:\n manager: Description\n ' super(AbstractPathTool, self).__init__(None) self.manager: PathToolManagerT = manager self._window: DocT = manager.window self._active: bool = False self._last_location = None self._rect: QRectF = _RECT self._pen: QPen = _PEN self.hide()
def __init__(self, manager: PathToolManagerT): '\n Args:\n manager: Description\n ' super(AbstractPathTool, self).__init__(None) self.manager: PathToolManagerT = manager self._window: DocT = manager.window self._active: bool = False self._last_location = None self._rect: QRectF = _RECT self._pen: QPen = _PEN self.hide()<|docstring|>Args: manager: Description<|endoftext|>
6e26a2914bd668f42d65a9cd52c40b0663971c4c17686ed2565680dc04ea2783
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget=None): '\n Args:\n painter: Description\n option: Description\n widget: Default is ``None``\n ' painter.setPen(self._pen) painter.setBrush(_BRUSH) painter.drawRect(_TOOL_RECT)
Args: painter: Description option: Description widget: Default is ``None``
cadnano/views/pathview/tools/abstractpathtool.py
paint
sherwoodyao/cadnano2.5
69
python
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget=None): '\n Args:\n painter: Description\n option: Description\n widget: Default is ``None``\n ' painter.setPen(self._pen) painter.setBrush(_BRUSH) painter.drawRect(_TOOL_RECT)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget=None): '\n Args:\n painter: Description\n option: Description\n widget: Default is ``None``\n ' painter.setPen(self._pen) painter.setBrush(_BRUSH) painter.drawRect(_TOOL_RECT)<|docstring|>Args: painter: Description option: Description widget: Default is ``None``<|endoftext|>
e29e0af3fe2356de67858d711cb5b2c32a685e75f21aadbbd2bf8d9dc0c571c1
def boundingRect(self) -> QRectF: 'Returns:\n bounding rectangle\n ' return self._rect
Returns: bounding rectangle
cadnano/views/pathview/tools/abstractpathtool.py
boundingRect
sherwoodyao/cadnano2.5
69
python
def boundingRect(self) -> QRectF: 'Returns:\n bounding rectangle\n ' return self._rect
def boundingRect(self) -> QRectF: 'Returns:\n bounding rectangle\n ' return self._rect<|docstring|>Returns: bounding rectangle<|endoftext|>
4ae0ec84a87f2b9938f880b2053a3e5e38e6ced8489972f93d38087489d8ba9f
def updateLocation(self, virtual_helix_item: PathVirtualHelixItemT, scene_pos: QPointF, *args): "Takes care of caching the location so that a tool switch\n outside the context of an event will know where to\n position the new tool and snaps self's pos to the upper\n left hand corner of the base the user is mousing over.\n\n Args:\n virtual_helix_item: PathVirtualHelixItem\n scene_pos: point in the scene\n *args: DESCRIPTION\n " if virtual_helix_item: if (self.parentObject() != virtual_helix_item): self.setParentItem(virtual_helix_item) self._last_location = (virtual_helix_item, scene_pos) pos_item = virtual_helix_item.mapFromScene(scene_pos) pos = self.helixPos(pos_item) if (pos is not None): if (pos != self.pos()): self.setPos(pos) self.update(self.boundingRect()) else: self._last_location = None if self.isVisible(): self.hide()
Takes care of caching the location so that a tool switch outside the context of an event will know where to position the new tool and snaps self's pos to the upper left hand corner of the base the user is mousing over. Args: virtual_helix_item: PathVirtualHelixItem scene_pos: point in the scene *args: DESCRIPTION
cadnano/views/pathview/tools/abstractpathtool.py
updateLocation
sherwoodyao/cadnano2.5
69
python
def updateLocation(self, virtual_helix_item: PathVirtualHelixItemT, scene_pos: QPointF, *args): "Takes care of caching the location so that a tool switch\n outside the context of an event will know where to\n position the new tool and snaps self's pos to the upper\n left hand corner of the base the user is mousing over.\n\n Args:\n virtual_helix_item: PathVirtualHelixItem\n scene_pos: point in the scene\n *args: DESCRIPTION\n " if virtual_helix_item: if (self.parentObject() != virtual_helix_item): self.setParentItem(virtual_helix_item) self._last_location = (virtual_helix_item, scene_pos) pos_item = virtual_helix_item.mapFromScene(scene_pos) pos = self.helixPos(pos_item) if (pos is not None): if (pos != self.pos()): self.setPos(pos) self.update(self.boundingRect()) else: self._last_location = None if self.isVisible(): self.hide()
def updateLocation(self, virtual_helix_item: PathVirtualHelixItemT, scene_pos: QPointF, *args): "Takes care of caching the location so that a tool switch\n outside the context of an event will know where to\n position the new tool and snaps self's pos to the upper\n left hand corner of the base the user is mousing over.\n\n Args:\n virtual_helix_item: PathVirtualHelixItem\n scene_pos: point in the scene\n *args: DESCRIPTION\n " if virtual_helix_item: if (self.parentObject() != virtual_helix_item): self.setParentItem(virtual_helix_item) self._last_location = (virtual_helix_item, scene_pos) pos_item = virtual_helix_item.mapFromScene(scene_pos) pos = self.helixPos(pos_item) if (pos is not None): if (pos != self.pos()): self.setPos(pos) self.update(self.boundingRect()) else: self._last_location = None if self.isVisible(): self.hide()<|docstring|>Takes care of caching the location so that a tool switch outside the context of an event will know where to position the new tool and snaps self's pos to the upper left hand corner of the base the user is mousing over. Args: virtual_helix_item: PathVirtualHelixItem scene_pos: point in the scene *args: DESCRIPTION<|endoftext|>
c5b30acb4f54f6123601402dac0913f24f9c2e754699d92468a97dfca3770d79
def lastLocation(self) -> Optional[Tuple[(PathVirtualHelixItemT, QPointF)]]: "A tool's last_location consists of a :class`PathVirtualHelixItem` and\n a scene pos (:class:`QPoint`) representing the last known location of\n the mouse.\n\n It can be used to provide visual continuity when switching tools.\n When the new tool is selected, this method will be invoked by\n calling `updateLocation(*old_tool.lastLocation())`.\n\n Returns:\n location: ``(virtual_helix_item, QPointF)`` representing the last\n known location of the mouse for purposes of positioning\n the graphic of a new tool on switching tools (the tool\n will have called on it)\n\n " return self._last_location
A tool's last_location consists of a :class`PathVirtualHelixItem` and a scene pos (:class:`QPoint`) representing the last known location of the mouse. It can be used to provide visual continuity when switching tools. When the new tool is selected, this method will be invoked by calling `updateLocation(*old_tool.lastLocation())`. Returns: location: ``(virtual_helix_item, QPointF)`` representing the last known location of the mouse for purposes of positioning the graphic of a new tool on switching tools (the tool will have called on it)
cadnano/views/pathview/tools/abstractpathtool.py
lastLocation
sherwoodyao/cadnano2.5
69
python
def lastLocation(self) -> Optional[Tuple[(PathVirtualHelixItemT, QPointF)]]: "A tool's last_location consists of a :class`PathVirtualHelixItem` and\n a scene pos (:class:`QPoint`) representing the last known location of\n the mouse.\n\n It can be used to provide visual continuity when switching tools.\n When the new tool is selected, this method will be invoked by\n calling `updateLocation(*old_tool.lastLocation())`.\n\n Returns:\n location: ``(virtual_helix_item, QPointF)`` representing the last\n known location of the mouse for purposes of positioning\n the graphic of a new tool on switching tools (the tool\n will have called on it)\n\n " return self._last_location
def lastLocation(self) -> Optional[Tuple[(PathVirtualHelixItemT, QPointF)]]: "A tool's last_location consists of a :class`PathVirtualHelixItem` and\n a scene pos (:class:`QPoint`) representing the last known location of\n the mouse.\n\n It can be used to provide visual continuity when switching tools.\n When the new tool is selected, this method will be invoked by\n calling `updateLocation(*old_tool.lastLocation())`.\n\n Returns:\n location: ``(virtual_helix_item, QPointF)`` representing the last\n known location of the mouse for purposes of positioning\n the graphic of a new tool on switching tools (the tool\n will have called on it)\n\n " return self._last_location<|docstring|>A tool's last_location consists of a :class`PathVirtualHelixItem` and a scene pos (:class:`QPoint`) representing the last known location of the mouse. It can be used to provide visual continuity when switching tools. When the new tool is selected, this method will be invoked by calling `updateLocation(*old_tool.lastLocation())`. Returns: location: ``(virtual_helix_item, QPointF)`` representing the last known location of the mouse for purposes of positioning the graphic of a new tool on switching tools (the tool will have called on it)<|endoftext|>
c60965a2b6ee05524ec409d9e4fc5242404b1d3ec68d56f9795182cd17c21e5d
def setActive(self, will_be_active: bool, old_tool=None): '\n Called by PathToolManager.setActiveTool when the tool becomes\n active. Used, for example, to show/hide tool-specific ui elements.\n\n Args:\n will_be_active (TYPE): Description\n old_tool (None, optional): Description\n ' if (self._active and (not will_be_active)): self.deactivate() self._active = will_be_active
Called by PathToolManager.setActiveTool when the tool becomes active. Used, for example, to show/hide tool-specific ui elements. Args: will_be_active (TYPE): Description old_tool (None, optional): Description
cadnano/views/pathview/tools/abstractpathtool.py
setActive
sherwoodyao/cadnano2.5
69
python
def setActive(self, will_be_active: bool, old_tool=None): '\n Called by PathToolManager.setActiveTool when the tool becomes\n active. Used, for example, to show/hide tool-specific ui elements.\n\n Args:\n will_be_active (TYPE): Description\n old_tool (None, optional): Description\n ' if (self._active and (not will_be_active)): self.deactivate() self._active = will_be_active
def setActive(self, will_be_active: bool, old_tool=None): '\n Called by PathToolManager.setActiveTool when the tool becomes\n active. Used, for example, to show/hide tool-specific ui elements.\n\n Args:\n will_be_active (TYPE): Description\n old_tool (None, optional): Description\n ' if (self._active and (not will_be_active)): self.deactivate() self._active = will_be_active<|docstring|>Called by PathToolManager.setActiveTool when the tool becomes active. Used, for example, to show/hide tool-specific ui elements. Args: will_be_active (TYPE): Description old_tool (None, optional): Description<|endoftext|>
555ca1dac1912efb9b690674e1fe16b5fa26604cd10d71689773c0dd7f797463
def deactivate(self): 'Summary\n\n Returns:\n TYPE: Description\n ' self.hide()
Summary Returns: TYPE: Description
cadnano/views/pathview/tools/abstractpathtool.py
deactivate
sherwoodyao/cadnano2.5
69
python
def deactivate(self): 'Summary\n\n Returns:\n TYPE: Description\n ' self.hide()
def deactivate(self): 'Summary\n\n Returns:\n TYPE: Description\n ' self.hide()<|docstring|>Summary Returns: TYPE: Description<|endoftext|>
7a67927d3e07c8b8ef05fc03f3cd1638c9420fa2e2f40aba02d03ca7bc0f151f
def isActive(self) -> bool: 'Returns isActive\n ' return self._active
Returns isActive
cadnano/views/pathview/tools/abstractpathtool.py
isActive
sherwoodyao/cadnano2.5
69
python
def isActive(self) -> bool: '\n ' return self._active
def isActive(self) -> bool: '\n ' return self._active<|docstring|>Returns isActive<|endoftext|>
a4734ef807fdc6e19c0a1bec70df497dd3add9a40eb0df711ac7f1f809fe0690
def widgetClicked(self): 'Called every time a widget representing self gets clicked,\n not just when changing tools.\n '
Called every time a widget representing self gets clicked, not just when changing tools.
cadnano/views/pathview/tools/abstractpathtool.py
widgetClicked
sherwoodyao/cadnano2.5
69
python
def widgetClicked(self): 'Called every time a widget representing self gets clicked,\n not just when changing tools.\n '
def widgetClicked(self): 'Called every time a widget representing self gets clicked,\n not just when changing tools.\n '<|docstring|>Called every time a widget representing self gets clicked, not just when changing tools.<|endoftext|>
8d171907726aa8b79eb1ea9d506b9fb1c8016b03b766afbf03d818bfd12677bd
def baseAtPoint(self, virtual_helix_item: PathVirtualHelixItemT, pt: QPointF) -> Tuple[(bool, int, int)]: 'Returns the ``(is_fwd, base_idx, strand_idx)`` corresponding\n to pt in virtual_helix_item.\n\n Args:\n virtual_helix_item: :class:`PathVirtualHelixItem`\n pt: Point on helix\n ' (x, strand_idx) = self.helixIndex(pt) is_fwd = (False if util.clamp(strand_idx, 0, 1) else True) return (is_fwd, x, strand_idx)
Returns the ``(is_fwd, base_idx, strand_idx)`` corresponding to pt in virtual_helix_item. Args: virtual_helix_item: :class:`PathVirtualHelixItem` pt: Point on helix
cadnano/views/pathview/tools/abstractpathtool.py
baseAtPoint
sherwoodyao/cadnano2.5
69
python
def baseAtPoint(self, virtual_helix_item: PathVirtualHelixItemT, pt: QPointF) -> Tuple[(bool, int, int)]: 'Returns the ``(is_fwd, base_idx, strand_idx)`` corresponding\n to pt in virtual_helix_item.\n\n Args:\n virtual_helix_item: :class:`PathVirtualHelixItem`\n pt: Point on helix\n ' (x, strand_idx) = self.helixIndex(pt) is_fwd = (False if util.clamp(strand_idx, 0, 1) else True) return (is_fwd, x, strand_idx)
def baseAtPoint(self, virtual_helix_item: PathVirtualHelixItemT, pt: QPointF) -> Tuple[(bool, int, int)]: 'Returns the ``(is_fwd, base_idx, strand_idx)`` corresponding\n to pt in virtual_helix_item.\n\n Args:\n virtual_helix_item: :class:`PathVirtualHelixItem`\n pt: Point on helix\n ' (x, strand_idx) = self.helixIndex(pt) is_fwd = (False if util.clamp(strand_idx, 0, 1) else True) return (is_fwd, x, strand_idx)<|docstring|>Returns the ``(is_fwd, base_idx, strand_idx)`` corresponding to pt in virtual_helix_item. Args: virtual_helix_item: :class:`PathVirtualHelixItem` pt: Point on helix<|endoftext|>
baa44910ad315e64c2a5007eed2fe3481ba027b4958debd31300e4c665b64706
def helixIndex(self, point: QPointF) -> Vec2T: 'Returns the (row, col) of the base which point lies within.\n\n Returns:\n point (tuple) in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' x = int((int(point.x()) / _BW)) y = int((int(point.y()) / _BW)) return (x, y)
Returns the (row, col) of the base which point lies within. Returns: point (tuple) in virtual_helix_item coordinates Args: point (TYPE): Description
cadnano/views/pathview/tools/abstractpathtool.py
helixIndex
sherwoodyao/cadnano2.5
69
python
def helixIndex(self, point: QPointF) -> Vec2T: 'Returns the (row, col) of the base which point lies within.\n\n Returns:\n point (tuple) in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' x = int((int(point.x()) / _BW)) y = int((int(point.y()) / _BW)) return (x, y)
def helixIndex(self, point: QPointF) -> Vec2T: 'Returns the (row, col) of the base which point lies within.\n\n Returns:\n point (tuple) in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' x = int((int(point.x()) / _BW)) y = int((int(point.y()) / _BW)) return (x, y)<|docstring|>Returns the (row, col) of the base which point lies within. Returns: point (tuple) in virtual_helix_item coordinates Args: point (TYPE): Description<|endoftext|>
6fcd29ace9ebdf25e380808a35d8b1d18f4b16b8e1b218726a93b729bcc82ecb
def helixPos(self, point): '\n Snaps a point to the upper left corner of the base\n it is within.\n point is in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' col = int((int(point.x()) / _BW)) row = int((int(point.y()) / _BW)) if ((col < 0) or (row < 0) or (row > 1)): return None return QPointF((col * _BW), (row * _BW))
Snaps a point to the upper left corner of the base it is within. point is in virtual_helix_item coordinates Args: point (TYPE): Description
cadnano/views/pathview/tools/abstractpathtool.py
helixPos
sherwoodyao/cadnano2.5
69
python
def helixPos(self, point): '\n Snaps a point to the upper left corner of the base\n it is within.\n point is in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' col = int((int(point.x()) / _BW)) row = int((int(point.y()) / _BW)) if ((col < 0) or (row < 0) or (row > 1)): return None return QPointF((col * _BW), (row * _BW))
def helixPos(self, point): '\n Snaps a point to the upper left corner of the base\n it is within.\n point is in virtual_helix_item coordinates\n\n Args:\n point (TYPE): Description\n ' col = int((int(point.x()) / _BW)) row = int((int(point.y()) / _BW)) if ((col < 0) or (row < 0) or (row > 1)): return None return QPointF((col * _BW), (row * _BW))<|docstring|>Snaps a point to the upper left corner of the base it is within. point is in virtual_helix_item coordinates Args: point (TYPE): Description<|endoftext|>
3636fe242f124e93e13e2396dc2c8eb721bb6562bac9dd824bd8906fdd499a69
def hoverLeaveEvent(self, event): '\n flag is for the case where an item in the path also needs to\n implement the hover method\n\n Args:\n event (TYPE): Description\n ' self.hide()
flag is for the case where an item in the path also needs to implement the hover method Args: event (TYPE): Description
cadnano/views/pathview/tools/abstractpathtool.py
hoverLeaveEvent
sherwoodyao/cadnano2.5
69
python
def hoverLeaveEvent(self, event): '\n flag is for the case where an item in the path also needs to\n implement the hover method\n\n Args:\n event (TYPE): Description\n ' self.hide()
def hoverLeaveEvent(self, event): '\n flag is for the case where an item in the path also needs to\n implement the hover method\n\n Args:\n event (TYPE): Description\n ' self.hide()<|docstring|>flag is for the case where an item in the path also needs to implement the hover method Args: event (TYPE): Description<|endoftext|>
ab4384e1c948c5140bf89424599b6126a4a8d9558b658efe18f5be53aaf26687
def __init__(self, pool_size): '\n Initialize the ImagePool class\n\n Parameters\n ----------\n pool_size : int\n The size of image buffer, if pool_size=0, no buffer will be created\n ' self.pool_size = pool_size if (self.pool_size > 0): self.num_imgs = 0 self.images = []
Initialize the ImagePool class Parameters ---------- pool_size : int The size of image buffer, if pool_size=0, no buffer will be created
pytorch_3T27T/utils/image_pool.py
__init__
HechengJin0/pytorch_3T27T
0
python
def __init__(self, pool_size): '\n Initialize the ImagePool class\n\n Parameters\n ----------\n pool_size : int\n The size of image buffer, if pool_size=0, no buffer will be created\n ' self.pool_size = pool_size if (self.pool_size > 0): self.num_imgs = 0 self.images = []
def __init__(self, pool_size): '\n Initialize the ImagePool class\n\n Parameters\n ----------\n pool_size : int\n The size of image buffer, if pool_size=0, no buffer will be created\n ' self.pool_size = pool_size if (self.pool_size > 0): self.num_imgs = 0 self.images = []<|docstring|>Initialize the ImagePool class Parameters ---------- pool_size : int The size of image buffer, if pool_size=0, no buffer will be created<|endoftext|>
d5c86d3ce4c8fe5830f154ff5eeedc5b620ff4dd36bd6a4a27e6b13863ff269c
def query(self, images): '\n Return an image from the pool.\n\n Parameters\n ----------\n images : torch.Tensor\n The latest generated images from the generator\n\n Returns\n -------\n return_images : torch.Tensor\n Images from the buffer. If buffer is not full, will return the\n given input image. Else, wth 50% chance, a previously stored image\n in the buffer will be returned and the given input image stored in\n the buffer.\n ' if (self.pool_size == 0): return images return_images = [] for image in images: image = torch.unsqueeze(image.data, 0) if (self.num_imgs < self.pool_size): self.num_imgs = (self.num_imgs + 1) self.images.append(image) return_images.append(image) else: p = random.uniform(0, 1) if (p > 0.5): random_id = random.randint(0, (self.pool_size - 1)) tmp = self.images[random_id].clone() self.images[random_id] = image return_images.append(tmp) else: return_images.append(image) return_images = torch.cat(return_images, 0) return return_images
Return an image from the pool. Parameters ---------- images : torch.Tensor The latest generated images from the generator Returns ------- return_images : torch.Tensor Images from the buffer. If buffer is not full, will return the given input image. Else, wth 50% chance, a previously stored image in the buffer will be returned and the given input image stored in the buffer.
pytorch_3T27T/utils/image_pool.py
query
HechengJin0/pytorch_3T27T
0
python
def query(self, images): '\n Return an image from the pool.\n\n Parameters\n ----------\n images : torch.Tensor\n The latest generated images from the generator\n\n Returns\n -------\n return_images : torch.Tensor\n Images from the buffer. If buffer is not full, will return the\n given input image. Else, wth 50% chance, a previously stored image\n in the buffer will be returned and the given input image stored in\n the buffer.\n ' if (self.pool_size == 0): return images return_images = [] for image in images: image = torch.unsqueeze(image.data, 0) if (self.num_imgs < self.pool_size): self.num_imgs = (self.num_imgs + 1) self.images.append(image) return_images.append(image) else: p = random.uniform(0, 1) if (p > 0.5): random_id = random.randint(0, (self.pool_size - 1)) tmp = self.images[random_id].clone() self.images[random_id] = image return_images.append(tmp) else: return_images.append(image) return_images = torch.cat(return_images, 0) return return_images
def query(self, images): '\n Return an image from the pool.\n\n Parameters\n ----------\n images : torch.Tensor\n The latest generated images from the generator\n\n Returns\n -------\n return_images : torch.Tensor\n Images from the buffer. If buffer is not full, will return the\n given input image. Else, wth 50% chance, a previously stored image\n in the buffer will be returned and the given input image stored in\n the buffer.\n ' if (self.pool_size == 0): return images return_images = [] for image in images: image = torch.unsqueeze(image.data, 0) if (self.num_imgs < self.pool_size): self.num_imgs = (self.num_imgs + 1) self.images.append(image) return_images.append(image) else: p = random.uniform(0, 1) if (p > 0.5): random_id = random.randint(0, (self.pool_size - 1)) tmp = self.images[random_id].clone() self.images[random_id] = image return_images.append(tmp) else: return_images.append(image) return_images = torch.cat(return_images, 0) return return_images<|docstring|>Return an image from the pool. Parameters ---------- images : torch.Tensor The latest generated images from the generator Returns ------- return_images : torch.Tensor Images from the buffer. If buffer is not full, will return the given input image. Else, wth 50% chance, a previously stored image in the buffer will be returned and the given input image stored in the buffer.<|endoftext|>
e38984ae0da95ba862854a7194507b33d708c6ac612346c7762b32c93efe0575
@property def password(self): '\n password属性函数\n 不允许直接读取原始值\n ' return '密码不是可读形式!'
password属性函数 不允许直接读取原始值
app/orm/User.py
password
sevenZz/CouponStatistic
0
python
@property def password(self): '\n password属性函数\n 不允许直接读取原始值\n ' return '密码不是可读形式!'
@property def password(self): '\n password属性函数\n 不允许直接读取原始值\n ' return '密码不是可读形式!'<|docstring|>password属性函数 不允许直接读取原始值<|endoftext|>
f1e8b6f7a9eecc71288ec22439801f8e0385975ca561c3835168739b8f8f222f
@password.setter def password(self, password): '\n 设置密码hash值\n ' self.password_hash = werkzeug.security.generate_password_hash(password)
设置密码hash值
app/orm/User.py
password
sevenZz/CouponStatistic
0
python
@password.setter def password(self, password): '\n \n ' self.password_hash = werkzeug.security.generate_password_hash(password)
@password.setter def password(self, password): '\n \n ' self.password_hash = werkzeug.security.generate_password_hash(password)<|docstring|>设置密码hash值<|endoftext|>