hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71be0c7310b1e5a59e2faa1aa48d7c69c065244 | 41,236 | py | Python | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 1 | 2020-04-07T21:12:08.000Z | 2020-04-07T21:12:08.000Z | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 2 | 2019-09-16T17:58:31.000Z | 2019-09-22T17:26:01.000Z | pyscf/fci/selected_ci.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Selected CI
Simple usage::
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='C 0 0 0; C 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> h2 = ao2mo.kernel(mol, mf.mo_coeff)
>>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
# (bb|bb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
# (aa|aa)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
# (bb|aa)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
#e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
# TODO: initial guess from CISD
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
#e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
r'''Spin-traced 1-particle density matrix.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
# dm[p,q,r,s] = <|p^+ q r^+ s|>
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin separated 2-particle density matrices.
The return values include three density matrices:
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
# (bb|aa) and (aa|bb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
# (aa|aa)
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
# (bb|bb)
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin-traced two-particle density matrix.
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
'''
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle density matrices.
See also function :func:`make_rdm1`
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
r''' S^2 |\Psi\rangle
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre) # S+*S-
trans(ci1, acre, bdes) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
##################################################
# don't modify the following attributes, they are not input options
#self.converged = False
#self.ci = None
self._strs = None
keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',
'start_tol', 'tol_decay_rate'))
self._keys = self._keys.union(keys)
def dump_flags(self, verbose=None):
direct_spin1.FCISolver.dump_flags(self, verbose)
logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)
logger.info(self, 'select_cutoff %g', self.select_cutoff)
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):
# The argument civec_strs is a CI vector in function FCISolver.contract_2e.
# Save and patch self._strs to make this contract_2e function compatible to
# FCISolver.contract_2e.
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = _as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index)
def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
na = len(ci_strs[0])
nb = len(ci_strs[1])
ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
| 42.599174 | 105 | 0.607964 |
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre)
trans(ci1, acre, bdes)
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
et_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
| true | true |
f71be110a2784a6f4b942989944478aaf8facaaa | 18,030 | py | Python | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | 2 | 2021-12-13T12:41:54.000Z | 2021-12-16T03:10:24.000Z | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | ultra/tests/test_evaluate.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import os
import pickle
import re
import shutil
import time
import unittest
import dill
import gym
import ray
from ultra.baselines.agent_spec import BaselineAgentSpec
from ultra.baselines.sac.sac.policy import SACPolicy
from ultra.evaluate import collect_evaluations, evaluate, evaluation_check
from ultra.utils.episode import episodes
seed = 2
AGENT_ID = "001"
class EvaluateTest(unittest.TestCase):
# Put generated files and folders in this directory.
OUTPUT_DIRECTORY = "tests/evaluate_test/"
@classmethod
def setUpClass(cls):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00 --level eval_test --root-dir tests/scenarios "
" --save-dir tests/task/eval_test/"
)
multiagent_generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00-multiagent --level eval_test --root-dir tests/scenarios "
"--save-dir tests/task/eval_test_multiagent/"
)
train_command = (
"python ultra/train.py "
"--task 00 --level eval_test --policy sac --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {path}"
)
multiagent_train_command = (
"python ultra/train.py "
"--task 00-multiagent --level eval_test --policy sac,dqn,ppo --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {multiagent_path}"
)
# Generate the scenarios.
os.system(generate_command)
os.system(multiagent_generate_command)
# Remove existing models
if os.path.exists(path):
shutil.rmtree(path)
if os.path.exists(multiagent_path):
shutil.rmtree(multiagent_path)
# Generate models before evaluation tests
if not os.path.exists(path):
os.system(train_command)
if not os.path.exists(multiagent_path):
os.system(multiagent_train_command)
def test_a_folders(self):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
if not os.path.exists(path):
self.assertTrue(False)
path = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models")
)[0]
if len(os.listdir(path)) == 0:
self.assertTrue(False)
path = "tests/task/eval_test"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
if not os.path.exists(path):
self.assertTrue(False)
multiagent_path = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models"
)
)[0]
if len(os.listdir(multiagent_path)) < 2:
self.assertTrue(False)
multiagent_path = "tests/task/eval_test_multiagent"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
def test_evaluation_check(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_logs/")
# ray.init(ignore_reinit_error=True)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00", "eval_test"), num_agents=1, log_dir=log_dir
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluation_check_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_multiagent_logs/"
)
# ray.init(ignore_reinit_error=True)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00-multiagent", "eval_test"),
num_agents=3,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_logs/")
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00 --level eval_test --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_multiagent_logs/"
)
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00-multiagent --level eval_test --agents 000 --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_agent(self):
seed = 2
models_directory = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models/")
)[0]
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_agent_logs/")
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
# This test performs evaluation on multiple agents, but the test map
# that is created can only support one agent. Skip this for now until
# we can specify a map to use that supports multiple agents.
@unittest.skip("Test map does not yet support multiple agents.")
def test_evaluate_multiagent(self):
seed = 2
models_directory = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models/"
)
)[0]
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_multiagent_logs/"
)
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00-multiagent", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
def test_record_evaluation_at_proper_episode_indices(self):
"""Due to parallelization, there might arise a situation where the episode
object at the beginning of an evaluation would not match the episode
object when recording to tensorboard. This test ensures that the evaluation data
(for both test and train scenarios) is recorded at the proper episode index.
"""
AGENT_ID = "000"
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_episode_check_log/"
)
# Arbitary values for evaluation rate and number of training episodes
eval_rate = 4
num_episodes = 20
train_command = (
"python ultra/train.py "
f"--task 00 --level eval_test --policy sac --headless --episodes {num_episodes} "
f"--eval-rate {eval_rate} --eval-episodes 2 --max-episode-steps 2 --log-dir {log_dir}"
)
if not os.path.exists(log_dir):
os.system(train_command)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation/results.pkl"
),
"rb",
) as handle:
evaluation_results = dill.load(handle)
# Check if the episode indices are divisible by the evaluation rate. If they
# do, then the evaluation data is properly saved under the results.pkl
# and also correctly added to the tensorboard
for index in evaluation_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation_Training/results.pkl"
),
"rb",
) as handle:
evaluation_training_results = dill.load(handle)
# Check if the episode indices are divisible by the evaluation rate. If they
# do, then the evaluation training data is properly saved under the results.pkl
# and also correctly added to the tensorboard
for index in evaluation_training_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
def test_extract_policy_from_path(self):
paths = [
"from.ultra.baselines.sac:sac-v0",
"hello.ultra.ppo:ppo-v1",
"ultra.custom:custom",
"a.sb.ultra.c.d.e.sac:sac-v99",
"a.b.c.d.e.ultra.custom_agent.policy:MBPO-v2",
]
def extract(path):
m = re.search(
"ultra(.)*([a-zA-Z0-9_]*.)+([a-zA-Z0-9_])+:[a-zA-Z0-9_]+((-)*[a-zA-Z0-9_]*)*",
path,
)
try:
policy_class = m.group(0) # pytype: disable=attribute-error
except AttributeError as e:
self.assertTrue(False)
for path in paths:
extract(path)
# @classmethod
# def tearDownClass(cls):
# os.system("ray stop")
@classmethod
def tearDownClass(cls):
if os.path.exists(EvaluateTest.OUTPUT_DIRECTORY):
shutil.rmtree(EvaluateTest.OUTPUT_DIRECTORY)
if os.path.exists("tests/task/eval_test/"):
shutil.rmtree("tests/task/eval_test/")
if os.path.exists("tests/task/eval_test_multiagent/"):
shutil.rmtree("tests/task/eval_test_multiagent/")
def run_experiment(scenario_info, num_agents, log_dir, headless=True):
agent_ids = ["0" * max(0, 3 - len(str(i))) + str(i) for i in range(num_agents)]
agent_classes = {agent_id: "ultra.baselines.sac:sac-v0" for agent_id in agent_ids}
agent_specs = {
agent_id: BaselineAgentSpec(policy_class=SACPolicy, max_episode_steps=2)
for agent_id in agent_ids
}
env = gym.make(
"ultra.env:ultra-v0",
agent_specs=agent_specs,
scenario_info=scenario_info,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
agents = {
agent_id: agent_spec.build_agent()
for agent_id, agent_spec in agent_specs.items()
}
total_step = 0
etag = ":".join([policy_class.split(":")[-1] for policy_class in agent_classes])
evaluation_task_ids = dict()
for episode in episodes(1, etag=etag, log_dir=log_dir):
observations = env.reset()
dones = {"__all__": False}
infos = None
episode.reset()
experiment_dir = episode.experiment_dir
if not os.path.exists(f"{experiment_dir}/agent_metadata.pkl"):
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
with open(f"{experiment_dir}/agent_metadata.pkl", "wb") as metadata_file:
dill.dump(
{
"agent_ids": agent_ids,
"agent_classes": agent_classes,
"agent_specs": agent_specs,
},
metadata_file,
pickle.HIGHEST_PROTOCOL,
)
while not dones["__all__"]:
evaluation_check(
agents=agents,
agent_ids=agent_ids,
episode=episode,
eval_rate=10,
eval_episodes=1,
max_episode_steps=2,
policy_classes=agent_classes,
scenario_info=scenario_info,
evaluation_task_ids=evaluation_task_ids,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
collect_evaluations(evaluation_task_ids=evaluation_task_ids)
actions = {
agent_id: agents[agent_id].act(observation, explore=True)
for agent_id, observation in observations.items()
}
next_observations, rewards, dones, infos = env.step(actions)
active_agent_ids = observations.keys() & next_observations.keys()
# pytype: disable=attribute-error
loss_outputs = {
agent_id: agents[agent_id].step(
state=observations[agent_id],
action=actions[agent_id],
reward=rewards[agent_id],
next_state=next_observations[agent_id],
done=dones[agent_id],
info=infos[agent_id],
)
for agent_id in active_agent_ids
}
# pytype: enable=attribute-error
episode.record_step(
agent_ids_to_record=active_agent_ids,
infos=infos,
rewards=rewards,
total_step=total_step,
loss_outputs=loss_outputs,
)
total_step += 1
observations = next_observations
# Wait on the remaining evaluations to finish.
while collect_evaluations(evaluation_task_ids):
time.sleep(0.1)
env.close()
| 35.916335 | 101 | 0.59279 |
import glob
import os
import pickle
import re
import shutil
import time
import unittest
import dill
import gym
import ray
from ultra.baselines.agent_spec import BaselineAgentSpec
from ultra.baselines.sac.sac.policy import SACPolicy
from ultra.evaluate import collect_evaluations, evaluate, evaluation_check
from ultra.utils.episode import episodes
seed = 2
AGENT_ID = "001"
class EvaluateTest(unittest.TestCase):
OUTPUT_DIRECTORY = "tests/evaluate_test/"
@classmethod
def setUpClass(cls):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00 --level eval_test --root-dir tests/scenarios "
" --save-dir tests/task/eval_test/"
)
multiagent_generate_command = (
"python ultra/scenarios/interface.py generate "
"--task 00-multiagent --level eval_test --root-dir tests/scenarios "
"--save-dir tests/task/eval_test_multiagent/"
)
train_command = (
"python ultra/train.py "
"--task 00 --level eval_test --policy sac --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {path}"
)
multiagent_train_command = (
"python ultra/train.py "
"--task 00-multiagent --level eval_test --policy sac,dqn,ppo --headless --episodes 1 "
f"--eval-rate 1 --eval-episodes 1 --max-episode-steps 2 --log-dir {multiagent_path}"
)
os.system(generate_command)
os.system(multiagent_generate_command)
if os.path.exists(path):
shutil.rmtree(path)
if os.path.exists(multiagent_path):
shutil.rmtree(multiagent_path)
if not os.path.exists(path):
os.system(train_command)
if not os.path.exists(multiagent_path):
os.system(multiagent_train_command)
def test_a_folders(self):
path = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/")
if not os.path.exists(path):
self.assertTrue(False)
path = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models")
)[0]
if len(os.listdir(path)) == 0:
self.assertTrue(False)
path = "tests/task/eval_test"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
multiagent_path = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/"
)
if not os.path.exists(path):
self.assertTrue(False)
multiagent_path = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models"
)
)[0]
if len(os.listdir(multiagent_path)) < 2:
self.assertTrue(False)
multiagent_path = "tests/task/eval_test_multiagent"
if len(os.listdir(path)) <= 2:
self.assertTrue(False)
def test_evaluation_check(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_logs/")
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00", "eval_test"), num_agents=1, log_dir=log_dir
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluation_check_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_check_multiagent_logs/"
)
ray.shutdown()
ray.init()
try:
run_experiment(
scenario_info=("00-multiagent", "eval_test"),
num_agents=3,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli(self):
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_logs/")
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00 --level eval_test --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_cli_multiagent(self):
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_cli_multiagent_logs/"
)
experiment_dir = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*")
)[0]
evaluate_command = (
f"python ultra/evaluate.py "
f"--task 00-multiagent --level eval_test --agents 000 --experiment-dir {experiment_dir} "
f"--episodes 1 --max-episode-steps 2 --log-dir {log_dir} --headless"
)
ray.shutdown()
try:
os.system(evaluate_command)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
if not os.listdir(log_dir):
raise "Evaluation failed to generate new experiment folder"
self.assertTrue(False)
else:
shutil.rmtree(log_dir)
def test_evaluate_agent(self):
seed = 2
models_directory = glob.glob(
os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "sac_test_models/*/models/")
)[0]
log_dir = os.path.join(EvaluateTest.OUTPUT_DIRECTORY, "output_eval_agent_logs/")
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
@unittest.skip("Test map does not yet support multiple agents.")
def test_evaluate_multiagent(self):
seed = 2
models_directory = glob.glob(
os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "multiagent_test_models/*/models/"
)
)[0]
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_multiagent_logs/"
)
with open(
os.path.join(models_directory, "../agent_metadata.pkl"), "rb"
) as metadata_file:
agent_metadata = pickle.load(metadata_file)
agent_ids = agent_metadata["agent_ids"]
policy_classes = agent_metadata["agent_classes"]
checkpoint_directories = {
agent_id: sorted(
glob.glob(os.path.join(models_directory, agent_id, "*")),
key=lambda x: int(x.split("/")[-1]),
)
for agent_id in agent_ids
}
ray.shutdown()
ray.init(ignore_reinit_error=True)
try:
evaluate.remote(
experiment_dir=None,
agent_ids=agent_ids,
policy_classes=policy_classes,
seed=seed,
checkpoint_dirs=checkpoint_directories,
scenario_info=("00-multiagent", "eval_test"),
num_episodes=1,
max_episode_steps=2,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
self.assertTrue(True)
except Exception as err:
print(err)
self.assertTrue(False)
def test_record_evaluation_at_proper_episode_indices(self):
AGENT_ID = "000"
log_dir = os.path.join(
EvaluateTest.OUTPUT_DIRECTORY, "output_eval_episode_check_log/"
)
eval_rate = 4
num_episodes = 20
train_command = (
"python ultra/train.py "
f"--task 00 --level eval_test --policy sac --headless --episodes {num_episodes} "
f"--eval-rate {eval_rate} --eval-episodes 2 --max-episode-steps 2 --log-dir {log_dir}"
)
if not os.path.exists(log_dir):
os.system(train_command)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation/results.pkl"
),
"rb",
) as handle:
evaluation_results = dill.load(handle)
for index in evaluation_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
with open(
os.path.join(
log_dir, os.listdir(log_dir)[0], "pkls/Evaluation_Training/results.pkl"
),
"rb",
) as handle:
evaluation_training_results = dill.load(handle)
for index in evaluation_training_results[AGENT_ID].keys():
self.assertEqual((index) % eval_rate, 0)
def test_extract_policy_from_path(self):
paths = [
"from.ultra.baselines.sac:sac-v0",
"hello.ultra.ppo:ppo-v1",
"ultra.custom:custom",
"a.sb.ultra.c.d.e.sac:sac-v99",
"a.b.c.d.e.ultra.custom_agent.policy:MBPO-v2",
]
def extract(path):
m = re.search(
"ultra(.)*([a-zA-Z0-9_]*.)+([a-zA-Z0-9_])+:[a-zA-Z0-9_]+((-)*[a-zA-Z0-9_]*)*",
path,
)
try:
policy_class = m.group(0)
except AttributeError as e:
self.assertTrue(False)
for path in paths:
extract(path)
@classmethod
def tearDownClass(cls):
if os.path.exists(EvaluateTest.OUTPUT_DIRECTORY):
shutil.rmtree(EvaluateTest.OUTPUT_DIRECTORY)
if os.path.exists("tests/task/eval_test/"):
shutil.rmtree("tests/task/eval_test/")
if os.path.exists("tests/task/eval_test_multiagent/"):
shutil.rmtree("tests/task/eval_test_multiagent/")
def run_experiment(scenario_info, num_agents, log_dir, headless=True):
agent_ids = ["0" * max(0, 3 - len(str(i))) + str(i) for i in range(num_agents)]
agent_classes = {agent_id: "ultra.baselines.sac:sac-v0" for agent_id in agent_ids}
agent_specs = {
agent_id: BaselineAgentSpec(policy_class=SACPolicy, max_episode_steps=2)
for agent_id in agent_ids
}
env = gym.make(
"ultra.env:ultra-v0",
agent_specs=agent_specs,
scenario_info=scenario_info,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
agents = {
agent_id: agent_spec.build_agent()
for agent_id, agent_spec in agent_specs.items()
}
total_step = 0
etag = ":".join([policy_class.split(":")[-1] for policy_class in agent_classes])
evaluation_task_ids = dict()
for episode in episodes(1, etag=etag, log_dir=log_dir):
observations = env.reset()
dones = {"__all__": False}
infos = None
episode.reset()
experiment_dir = episode.experiment_dir
if not os.path.exists(f"{experiment_dir}/agent_metadata.pkl"):
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
with open(f"{experiment_dir}/agent_metadata.pkl", "wb") as metadata_file:
dill.dump(
{
"agent_ids": agent_ids,
"agent_classes": agent_classes,
"agent_specs": agent_specs,
},
metadata_file,
pickle.HIGHEST_PROTOCOL,
)
while not dones["__all__"]:
evaluation_check(
agents=agents,
agent_ids=agent_ids,
episode=episode,
eval_rate=10,
eval_episodes=1,
max_episode_steps=2,
policy_classes=agent_classes,
scenario_info=scenario_info,
evaluation_task_ids=evaluation_task_ids,
timestep_sec=0.1,
headless=True,
log_dir=log_dir,
)
collect_evaluations(evaluation_task_ids=evaluation_task_ids)
actions = {
agent_id: agents[agent_id].act(observation, explore=True)
for agent_id, observation in observations.items()
}
next_observations, rewards, dones, infos = env.step(actions)
active_agent_ids = observations.keys() & next_observations.keys()
loss_outputs = {
agent_id: agents[agent_id].step(
state=observations[agent_id],
action=actions[agent_id],
reward=rewards[agent_id],
next_state=next_observations[agent_id],
done=dones[agent_id],
info=infos[agent_id],
)
for agent_id in active_agent_ids
}
episode.record_step(
agent_ids_to_record=active_agent_ids,
infos=infos,
rewards=rewards,
total_step=total_step,
loss_outputs=loss_outputs,
)
total_step += 1
observations = next_observations
while collect_evaluations(evaluation_task_ids):
time.sleep(0.1)
env.close()
| true | true |
f71be12c5aef84eac371279bc150835aa8551c7d | 27 | py | Python | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | 3 | 2022-03-16T09:11:33.000Z | 2022-03-19T19:43:50.000Z | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | null | null | null | tess_py_api/TessPyWrap/__init__.py | orel98/tess_py_api | 538cbf64fa795318366ac2ceeec8b15d5cf9ae84 | [
"MIT"
] | null | null | null | from .CpyAPI import CpyAPI
| 13.5 | 26 | 0.814815 | from .CpyAPI import CpyAPI
| true | true |
f71be1fb130e1061491a135bc6bfa210726ceb66 | 650 | py | Python | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | PythonDesafios/d076.py | adaatii/Python-Curso-em-Video- | 30b37713b3685469558babb93b557b53210f010c | [
"MIT"
] | null | null | null | # Crie um programa que tenha uma tupla única com nomes de produtos
# e seus respectivos preços, na sequência. No final, mostre uma
# listagem de preços, organizando os dados em forma tabular.
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
| 28.26087 | 66 | 0.543077 |
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
| true | true |
f71be2a3c566dab4f1526507df28e4b0c96bd528 | 4,961 | py | Python | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | NMT/dataset.py | MISStingting/NMTmodel | 970115d6f9fcd015d7daf3ad0e4844055e2af5d3 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
# tf.enable_eager_execution()
class Dataset(object):
def get_dataset(self, params, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
features_path = params["train_features_file"]
labels_path = params["train_labels_file"]
elif mode == tf.estimator.ModeKeys.EVAL:
features_path = params["eval_features_file"]
labels_path = params["eval_labels_file"]
elif mode == tf.estimator.ModeKeys.PREDICT:
features_path = params["test_features_file"]
labels_path = params["test_labels_file"]
else:
raise ValueError("wrong mode!!!")
features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
dataset = features_dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params["reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
dataset = dataset.map(lambda src: (src, tf.size(src)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(tf.constant("<blank>"), 0))
iterator = dataset.make_one_shot_iterator()
src, src_len = iterator.get_next()
features = {
"input": src,
"input_length": src_len
}
labels = None
else:
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))
dataset = dataset.repeat(params["repeat"]).shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params[
"reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
if params["src_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src[:params["src_max_len"]], tgt))
if params["tgt_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src, tgt[:params["tgt_max_len"]]))
dataset = dataset.map(
lambda src, tgt: (src,
tf.concat((["<s>"], tgt), 0),
tf.concat((tgt, ["</s>"]), 0)),
num_parallel_calls=params["num_parallel_calls"])
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([])),
padding_values=(
tf.constant("<blank>", dtype=tf.string),
tf.constant("<s>", dtype=tf.string),
tf.constant("</s>", dtype=tf.string),
0,
0))
iterator = dataset.make_one_shot_iterator()
src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()
features = {
"input": src,
"input_length": input_length
}
labels = {
"output_in": tgt_in,
"output_out": tgt_out,
"output_length": output_length
}
return features, labels
@staticmethod
def _load_dataset(features_path, labels_path, mode):
''' 从文件读取dataset
:param mode:
:return:
'''
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
labels_dataset = tf.data.TextLineDataset(filenames=labels_path)
return features_dataset, labels_dataset
elif mode == tf.estimator.ModeKeys.PREDICT:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
return features_dataset, None
data_util = Dataset()
| 48.637255 | 118 | 0.504939 | import tensorflow as tf
class Dataset(object):
def get_dataset(self, params, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
features_path = params["train_features_file"]
labels_path = params["train_labels_file"]
elif mode == tf.estimator.ModeKeys.EVAL:
features_path = params["eval_features_file"]
labels_path = params["eval_labels_file"]
elif mode == tf.estimator.ModeKeys.PREDICT:
features_path = params["test_features_file"]
labels_path = params["test_labels_file"]
else:
raise ValueError("wrong mode!!!")
features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
dataset = features_dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params["reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
dataset = dataset.map(lambda src: (src, tf.size(src)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(tf.constant("<blank>"), 0))
iterator = dataset.make_one_shot_iterator()
src, src_len = iterator.get_next()
features = {
"input": src,
"input_length": src_len
}
labels = None
else:
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))
dataset = dataset.repeat(params["repeat"]).shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params[
"reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
if params["src_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src[:params["src_max_len"]], tgt))
if params["tgt_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src, tgt[:params["tgt_max_len"]]))
dataset = dataset.map(
lambda src, tgt: (src,
tf.concat((["<s>"], tgt), 0),
tf.concat((tgt, ["</s>"]), 0)),
num_parallel_calls=params["num_parallel_calls"])
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([])),
padding_values=(
tf.constant("<blank>", dtype=tf.string),
tf.constant("<s>", dtype=tf.string),
tf.constant("</s>", dtype=tf.string),
0,
0))
iterator = dataset.make_one_shot_iterator()
src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()
features = {
"input": src,
"input_length": input_length
}
labels = {
"output_in": tgt_in,
"output_out": tgt_out,
"output_length": output_length
}
return features, labels
@staticmethod
def _load_dataset(features_path, labels_path, mode):
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
labels_dataset = tf.data.TextLineDataset(filenames=labels_path)
return features_dataset, labels_dataset
elif mode == tf.estimator.ModeKeys.PREDICT:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
return features_dataset, None
data_util = Dataset()
| true | true |
f71be2a65e952c2d97ca924e09c67df982278f0e | 66,367 | py | Python | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | 1 | 2017-09-07T00:48:20.000Z | 2017-09-07T00:48:20.000Z | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | null | null | null | tools/run_tests/run_tests.py | jtcho/grpc | 99673fcbe341a981c27d2becd572468863bff33b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
if self.args.config == 'ubsan':
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang++%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.2':
return ('ubuntu1710', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
elif compiler == 'clang7.0':
# clang++-7.0 alias doesn't exist and there are no other clang versions
# installed.
return ('sanitizers_jessie', self._clang_make_options())
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_manager_name(), _docker_arch_suffix(self.args.arch))
def _python_manager_name(self):
"""Choose the docker image to use based on python version."""
if self.args.compiler in [
'python2.7', 'python3.5', 'python3.6', 'python3.7'
]:
return 'stretch_' + self.args.compiler[len('python'):]
elif self.args.compiler == 'python_alpine':
return 'alpine'
elif self.args.compiler == 'python3.4':
return 'jessie'
else:
return 'stretch_3.7'
def _get_pythons(self, args):
"""Get python runtimes to test with, based on current platform, architecture, compiler etc."""
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
test_command = 'test_lite'
if args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(
shell, builder, builder_prefix_arguments, venv_relative_python,
toolchain, runner, test_command, args.iomgr_platform)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
python37_config = _python_config_generator(
name='py37',
major='3',
minor='7',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (
python27_config,
python37_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'python3.7':
return (python37_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python34_config,
python35_config,
python36_config,
python37_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['default', 'coreclr'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
elif self.platform == 'mac':
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ['mono', '--arch=64']
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
self.config.job_spec(
['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
timeout_seconds=10 * 60,
shortname='cfstream-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [
['src/objective-c/tests/build_tests.sh'],
['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=sorted(_LANGUAGES.keys()),
nargs='+',
required=True)
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv', 'gevent'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epoll1,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--auto_set_flakes',
default=False,
const=True,
action='store_const',
help=
'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print(
"Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C & C++ to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option
for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
)]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec for language in languages for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run)
if infinite_runs else itertools.repeat(
massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' % (k, num_failures,
num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| 35.096245 | 135 | 0.566773 |
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
if self.args.config == 'ubsan':
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang++%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.2':
return ('ubuntu1710', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
elif compiler == 'clang7.0':
# clang++-7.0 alias doesn't exist and there are no other clang versions
return ('sanitizers_jessie', self._clang_make_options())
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_manager_name(), _docker_arch_suffix(self.args.arch))
def _python_manager_name(self):
if self.args.compiler in [
'python2.7', 'python3.5', 'python3.6', 'python3.7'
]:
return 'stretch_' + self.args.compiler[len('python'):]
elif self.args.compiler == 'python_alpine':
return 'alpine'
elif self.args.compiler == 'python3.4':
return 'jessie'
else:
return 'stretch_3.7'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
test_command = 'test_lite'
if args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(
shell, builder, builder_prefix_arguments, venv_relative_python,
toolchain, runner, test_command, args.iomgr_platform)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
python37_config = _python_config_generator(
name='py37',
major='3',
minor='7',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (
python27_config,
python37_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'python3.7':
return (python37_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python34_config,
python35_config,
python36_config,
python37_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['default', 'coreclr'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
elif self.platform == 'mac':
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ['mono', '--arch=64']
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
self.config.job_spec(
['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
timeout_seconds=10 * 60,
shortname='cfstream-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [
['src/objective-c/tests/build_tests.sh'],
['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=sorted(_LANGUAGES.keys()),
nargs='+',
required=True)
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv', 'gevent'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epoll1,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--auto_set_flakes',
default=False,
const=True,
action='store_const',
help=
'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print(
"Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
language_make_options = list(
set([
make_option
for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t'
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
)]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec for language in languages for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
massaged_one_run = list(
one_run)
num_jobs = len(massaged_one_run)
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run)
if infinite_runs else itertools.repeat(
massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' % (k, num_failures,
num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| true | true |
f71be3b444baa4607d29128c718bb04ae6b2c311 | 32,709 | py | Python | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | ryu/lib/ofctl_v1_4.py | vinaykothiyal/ryu | 32551989c649311854215df29860ccb272c105c0 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = str_to_int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = str_to_int(i.get('metadata'))
metadata_mask = (str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif inst_type == 'METER':
meter_id = str_to_int(i.get('meter_id'))
instructions.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if 'field' in s:
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
# apply/write/clear-action instruction
if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
# others
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': str_to_int,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'ip_proto': str_to_int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': str_to_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_4.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b['actions'] = actions
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = str_to_int(flow.get('importance', 0))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, importance, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, group_type, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter
| 33.965732 | 78 | 0.575897 |
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = str_to_int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = str_to_int(i.get('metadata'))
metadata_mask = (str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif inst_type == 'METER':
meter_id = str_to_int(i.get('meter_id'))
instructions.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if 'field' in s:
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': str_to_int,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'ip_proto': str_to_int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': str_to_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_4.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b['actions'] = actions
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = str_to_int(flow.get('importance', 0))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, importance, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, group_type, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
send_experimenter = ofctl_utils.send_experimenter
| true | true |
f71be3d981254befffd5928f1197ba90e0eb5617 | 15,646 | py | Python | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 4 | 2021-03-29T19:15:29.000Z | 2021-06-08T05:34:00.000Z | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 1 | 2021-06-08T06:03:51.000Z | 2021-06-08T06:03:51.000Z | venv/Lib/site-packages/pooch/downloaders.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 1 | 2021-01-31T18:58:54.000Z | 2021-01-31T18:58:54.000Z | # Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
The classes that actually handle the downloads.
"""
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
"""
Choose the appropriate downloader for the given URL based on the protocol.
Parameters
----------
url : str
A URL (including protocol).
Returns
-------
downloader
A downloader class (either :class:`pooch.HTTPDownloader`,
:class:`pooch.FTPDownloader`, or :class: `pooch.SFTPDownloader`).
Examples
--------
>>> downloader = choose_downloader("http://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("https://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("ftp://something.com")
>>> print(downloader.__class__.__name__)
FTPDownloader
"""
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url['protocol']}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over HTTP/HTTPS.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`requests` library to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
**kwargs
All keyword arguments given when creating an instance of this class
will be passed to :func:`requests.get`.
Examples
--------
Download one of the data files from the Pooch repository:
>>> import os
>>> from pooch import version, check_version
>>> url = "https://github.com/fatiando/pooch/raw/{}/data/tiny-data.txt"
>>> url = url.format(check_version(version.full_version))
>>> downloader = HTTPDownloader()
>>> # Not using with Pooch.fetch so no need to pass an instance of Pooch
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> os.path.exists("tiny-data.txt")
True
>>> with open("tiny-data.txt") as f:
... print(f.read().strip())
# A tiny data file for test purposes only
1 2 3 4 5 6
>>> os.remove("tiny-data.txt")
Authentication can be handled by passing a user name and password to
:func:`requests.get`. All arguments provided when creating an instance of
the class are forwarded to :func:`requests.get`. We'll use
``auth=(username, password)`` to use basic HTTPS authentication. The
https://httpbin.org website allows us to make a fake a login request using
whatever username and password we provide to it:
>>> user = "doggo"
>>> password = "goodboy"
>>> # httpbin will ask for the user and password we provide in the URL
>>> url = f"https://httpbin.org/basic-auth/{user}/{password}"
>>> # Trying without the login credentials causes an error
>>> downloader = HTTPDownloader()
>>> try:
... downloader(url=url, output_file="tiny-data.txt", pooch=None)
... except Exception:
... print("There was an error!")
There was an error!
>>> # Pass in the credentials to HTTPDownloader
>>> downloader = HTTPDownloader(auth=(user, password))
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> with open("tiny-data.txt") as f:
... for line in f:
... print(line.rstrip())
{
"authenticated": true,
"user": "doggo"
}
>>> os.remove("tiny-data.txt")
"""
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over HTTP to the given output file.
Uses :func:`requests.get`.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# Need to use ascii characters on Windows because there isn't
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over FTP.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`ftplib` module to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the FTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP). Use the empty string
to indicate no password is required.
account : str
Some servers also require an "account" name for authentication.
timeout : int
Timeout in seconds for ftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
"""
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over FTP to the given output file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url['path']}"
if self.progressbar:
# Make sure the file is set to binary mode, otherwise we can't
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
"Update the progress bar and write to output"
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over SFTP.
When called, downloads the given file URL into the specified local file.
Requires `paramiko <https://github.com/paramiko/paramiko>`__ to be
installed.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the SFTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP). Use the empty
string to indicate no password is required.
timeout : int
Timeout in seconds for sftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard
error (stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to
be installed.
"""
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
"""
Download the given URL over SFTP to the given output file.
The output file must be given as a string (file name/path) and not an
open file object! Otherwise, paramiko cannot save to that file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str
Path (and file name) to which the file will be downloaded. **Cannot
be a file object**.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
"Update the progress bar and write to output"
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
| 35.803204 | 85 | 0.592612 |
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url['protocol']}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader:
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader:
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url['path']}"
if self.progressbar:
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
| true | true |
f71be436acf919b617f27f823476c8b4531b4b98 | 7,043 | py | Python | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 3 | 2020-01-21T18:09:09.000Z | 2022-01-17T08:06:03.000Z | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | homeassistant/auth/providers/trusted_networks.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """Trusted Networks auth provider.
It shows list of users if access from trusted network.
Abort login flow if not access from trusted network.
"""
from ipaddress import ip_network, IPv4Address, IPv6Address, IPv4Network, IPv6Network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from . import AuthProvider, AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
# we only validate the format of user_id or group_id
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
"""Raised when try to access from untrusted networks."""
class InvalidUserError(HomeAssistantError):
"""Raised when try to login as invalid user."""
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
"""Trusted Networks auth provider.
Allow passwordless access from trusted network.
"""
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
"""Return trusted networks."""
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
"""Return trusted users per network."""
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
"""Trusted Networks auth provider does not support MFA."""
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
# We only allow login as exist user
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Trusted network auth provider should never create new user.
"""
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
"""Make sure the access from trusted networks.
Raise InvalidAuthError if not.
Raise InvalidAuthError if trusted_networks is not configured.
"""
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
"""Handler for the login flow."""
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
"""Initialize the login flow."""
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_whitelisted")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
| 34.18932 | 87 | 0.609115 | from ipaddress import ip_network, IPv4Address, IPv6Address, IPv4Network, IPv6Network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from . import AuthProvider, AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
class InvalidUserError(HomeAssistantError):
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_whitelisted")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
| true | true |
f71be5558aa2a08dbbb15648b144ea54b9b317ff | 854 | py | Python | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | graphgallery/nn/layers/tensorflow/dropout/dropout.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Dropout
class SparseDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def call(self, x, training=None):
if training is None:
training = K.learning_phase()
if self.p and training:
values = tf.nn.dropout(x.values, self.p)
return tf.SparseTensor(x.indices, values, x.dense_shape)
return x
class MixedDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = Dropout(p)
self.sparse_dropout = SparseDropout(p)
def call(self, x):
if K.is_sparse(x):
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
| 26.6875 | 69 | 0.591335 | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Dropout
class SparseDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def call(self, x, training=None):
if training is None:
training = K.learning_phase()
if self.p and training:
values = tf.nn.dropout(x.values, self.p)
return tf.SparseTensor(x.indices, values, x.dense_shape)
return x
class MixedDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = Dropout(p)
self.sparse_dropout = SparseDropout(p)
def call(self, x):
if K.is_sparse(x):
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
| true | true |
f71be5cc21797fff7f87730b2e377bc2fe651fa1 | 14,118 | py | Python | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/deeplearning_prepare_data/deeplearning_prepare_data_pipeline.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
"""Deeplearning prepare data - MRI in nifti format are transformed into
PyTorch tensors. The transformation is applied to: the whole volume, a
selection of 3D patches, or slices extracted from the 3D volume. By default
it uses the cropped version of the MRI (see option "--use_uncropper_image")
Returns:
A clinica pipeline object containing the Deeplearning prepare data pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["input_nifti"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ["image_id"]
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
# Select the correct filetype corresponding to modality
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get('custom_suffix')}",
"description": "Custom suffix",
}
# Input file:
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
# Load the corresponding masks
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
# The reading node
# -------------------------
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
# Write node
# ----------------------
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
# Get subject ID node
# ----------------------
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
# Find container path from input filename
# ----------------------
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
# fmt: off
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
# (image_id_node, write_node, [('image_id', '@image_id')]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
# fmt: on
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
# fmt: on
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
# fmt: on
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
# fmt: on
else:
# fmt: off
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
# fmt: on
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
# fmt: off
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
# fmt: on
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
# The processing nodes
# Node to save input in nii.gz format into pytorch .pt format
# ----------------------
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
# Extract slices node (options: 3 directions, mode)
# ----------------------
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
# Extract patches node (options, patch size and stride size)
# ----------------------
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
# Extract ROi node
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
# Connections
# ----------------------
# fmt: off
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
# fmt: on
| 35.741772 | 116 | 0.541507 |
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
def check_custom_dependencies(self):
def get_input_fields(self):
return ["input_nifti"]
def get_output_fields(self):
return ["image_id"]
def build_input_node(self):
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get('custom_suffix')}",
"description": "Custom suffix",
}
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
else:
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
def build_core_nodes(self):
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
| true | true |
f71be64382d9c7cd419601ae2aa3d28d776baa3a | 2,261 | py | Python | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 181 | 2020-03-26T12:33:25.000Z | 2022-03-28T04:04:25.000Z | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 11 | 2020-07-26T13:18:50.000Z | 2022-01-09T10:04:10.000Z | scripts/xml2txt.py | o8r/pytorch_cpp | 70ba1e64270da6d870847c074ce33afb154f1ef8 | [
"MIT"
] | 38 | 2020-05-04T05:06:55.000Z | 2022-03-29T19:10:51.000Z | import os
import glob
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser()
# Define parameter
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--class_list', type=str)
args = parser.parse_args()
# Set Class Names
def set_class_names(class_list):
f = open(class_list, mode='r')
class_names = []
while True:
line = f.readline().strip()
if not line:
break
class_names += [line]
f.close()
return class_names
# Normalize Bounding Box
def normalizeBB(x_min, x_max, y_min, y_max, width, height):
x_center = (x_min + x_max) * 0.5 / float(width)
y_center = (y_min + y_max) * 0.5 / float(height)
x_range = (x_max - x_min) / float(width)
y_range = (y_max - y_min) / float(height)
return x_center, y_center, x_range, y_range
# Convert XML into TXT
def convertXML2TXT(class_names, pathI, pathO):
fileI = open(pathI, mode='r')
fileO = open(pathO, mode='w')
tree = ET.parse(fileI)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
for obj in root.iter('object'):
class_name = obj.find('name').text
class_id = class_names.index(class_name)
BB = obj.find('bndbox')
x_min = float(BB.find('xmin').text)
x_max = float(BB.find('xmax').text)
y_min = float(BB.find('ymin').text)
y_max = float(BB.find('ymax').text)
x_center, y_center, x_range, y_range = normalizeBB(x_min, x_max, y_min, y_max, width, height)
fileO.write(f'{class_id} {x_center} {y_center} {x_range} {y_range}\n')
fileI.close()
fileO.close()
if __name__ == '__main__':
# Get File Names
fnames = []
for f in glob.glob(f'{args.input_dir}/*.xml'):
fnames.append(os.path.splitext(os.path.split(f)[1])[0])
# Set Class Names
class_names = set_class_names(args.class_list)
# Convert XML into TXT
os.makedirs(f'{args.output_dir}', exist_ok=False)
for f in fnames:
pathI = f'{args.input_dir}/{f}.xml'
pathO = f'{args.output_dir}/{f}.txt'
convertXML2TXT(class_names, pathI, pathO)
| 27.91358 | 101 | 0.632906 | import os
import glob
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--class_list', type=str)
args = parser.parse_args()
def set_class_names(class_list):
f = open(class_list, mode='r')
class_names = []
while True:
line = f.readline().strip()
if not line:
break
class_names += [line]
f.close()
return class_names
def normalizeBB(x_min, x_max, y_min, y_max, width, height):
x_center = (x_min + x_max) * 0.5 / float(width)
y_center = (y_min + y_max) * 0.5 / float(height)
x_range = (x_max - x_min) / float(width)
y_range = (y_max - y_min) / float(height)
return x_center, y_center, x_range, y_range
def convertXML2TXT(class_names, pathI, pathO):
fileI = open(pathI, mode='r')
fileO = open(pathO, mode='w')
tree = ET.parse(fileI)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
for obj in root.iter('object'):
class_name = obj.find('name').text
class_id = class_names.index(class_name)
BB = obj.find('bndbox')
x_min = float(BB.find('xmin').text)
x_max = float(BB.find('xmax').text)
y_min = float(BB.find('ymin').text)
y_max = float(BB.find('ymax').text)
x_center, y_center, x_range, y_range = normalizeBB(x_min, x_max, y_min, y_max, width, height)
fileO.write(f'{class_id} {x_center} {y_center} {x_range} {y_range}\n')
fileI.close()
fileO.close()
if __name__ == '__main__':
fnames = []
for f in glob.glob(f'{args.input_dir}/*.xml'):
fnames.append(os.path.splitext(os.path.split(f)[1])[0])
class_names = set_class_names(args.class_list)
os.makedirs(f'{args.output_dir}', exist_ok=False)
for f in fnames:
pathI = f'{args.input_dir}/{f}.xml'
pathO = f'{args.output_dir}/{f}.txt'
convertXML2TXT(class_names, pathI, pathO)
| true | true |
f71be6b44b2393e0c5756152ad6854307836bc3b | 20,079 | py | Python | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_kitti_dataset.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import tempfile
import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period
from mmdet3d.datasets import KittiDataset
def _generate_kitti_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1.0, 1.0],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
modality = dict(use_lidar=True, use_camera=False)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def _generate_kitti_multi_modality_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='Resize', multiscale_mode='value', keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
])
]
modality = dict(use_lidar=True, use_camera=True)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def test_getitem():
np.random.seed(0)
data_root, ann_file, classes, pts_prefix, \
_, modality, split = _generate_kitti_dataset_config()
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=dict(backend='disk')),
dict(
type='ObjectSample',
db_sampler=dict(
data_root='tests/data/kitti/',
# in coordinate system refactor, this test file is modified
info_path='tests/data/kitti/kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Pedestrian=10)),
classes=['Pedestrian', 'Cyclist', 'Car'],
sample_groups=dict(Pedestrian=6))),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='ObjectRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(type='PointShuffle'),
dict(
type='DefaultFormatBundle3D',
class_names=['Pedestrian', 'Cyclist', 'Car']),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
data = kitti_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])
expected_gt_labels_3d = torch.tensor([0])
rot_matrix = data['img_metas']._data['pcd_rotation']
rot_angle = data['img_metas']._data['pcd_rotation_angle']
horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
vertical_flip = data['img_metas']._data['pcd_vertical_flip']
expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],
[-0.5976, 0.8018, 0.0000],
[0.0000, 0.0000, 1.0000]])
expected_rot_angle = 0.6404654291602163
noise_angle = 0.20247319
assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)
assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)
assert horizontal_flip is True
assert vertical_flip is False
# after coord system refactor
expected_gt_bboxes_3d[:, :3] = \
expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix
expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \
+ 2 * rot_angle - 2 * noise_angle
expected_gt_bboxes_3d[:, -1:] = limit_period(
expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)
assert points.shape == (780, 4)
assert torch.allclose(
gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)
assert torch.all(gt_labels_3d == expected_gt_labels_3d)
# test multi-modality KITTI dataset
np.random.seed(0)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
multi_modality_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='Resize',
img_scale=[(640, 192), (2560, 768)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0.2, 0.2, 0.2]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=classes),
dict(
type='Collect3D',
keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),
]
modality = dict(use_lidar=True, use_camera=True)
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
data = kitti_dataset[0]
img = data['img']._data
lidar2img = data['img_metas']._data['lidar2img']
expected_lidar2img = np.array(
[[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],
[1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],
[9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
assert img.shape[:] == (3, 416, 1344)
assert np.allclose(lidar2img, expected_lidar2img)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
metric = ['mAP']
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
ap_dict = kitti_dataset.evaluate([result], metric)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],
3.0303030303030307)
def test_show():
from os import path as osp
import mmcv
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(
data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],
[33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],
[46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],
[33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],
[58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test multi-modality show
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
_, _, _, _, multi_modality_pipeline, modality, _ = \
_generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
# test multi-modality show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
def test_format_results():
from mmdet3d.core.bbox import LiDARInstance3DBoxes
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
# coord system refactor
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
result_files, tmp_dir = kitti_dataset.format_results(results)
expected_name = np.array(['Pedestrian'])
expected_truncated = np.array([0.])
expected_occluded = np.array([0])
# coord sys refactor
expected_alpha = np.array(-3.3410306 + np.pi)
expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])
expected_dimensions = np.array([[1.2, 1.89, 0.48]])
expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])
expected_rotation_y = np.array([0.0100])
expected_score = np.array([0.5])
expected_sample_idx = np.array([0])
assert np.all(result_files[0]['name'] == expected_name)
assert np.allclose(result_files[0]['truncated'], expected_truncated)
assert np.all(result_files[0]['occluded'] == expected_occluded)
assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)
assert np.allclose(result_files[0]['bbox'], expected_bbox)
assert np.allclose(result_files[0]['dimensions'], expected_dimensions)
assert np.allclose(result_files[0]['location'], expected_location)
assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,
1e-3)
assert np.allclose(result_files[0]['score'], expected_score)
assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)
tmp_dir.cleanup()
def test_bbox2result_kitti():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
expected_name = np.array(['Pedestrian'])
expected_dimensions = np.array([1.2000, 1.8900, 0.4800])
# coord system refactor (reverse sign)
expected_rotation_y = 0.0100
expected_score = np.array([0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)
assert np.allclose(det_annos[0]['score'], expected_score)
assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))
labels_3d = torch.tensor([])
scores_3d = torch.tensor([])
empty_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [empty_result]
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
def test_bbox2result_kitti2d():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],
[33.3189, 0.1981, 0.3136, 0.5656, 0.5]],
[[46.1366, -4.6404, -0.9510, 0.5162, 0.5],
[33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(
['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],
[33.3189, 0.1981, 0.3136, 0.5656],
[46.1366, -4.6404, -0.951, 0.5162],
[33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score)
| 41.91858 | 79 | 0.619852 |
import math
import os
import tempfile
import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period
from mmdet3d.datasets import KittiDataset
def _generate_kitti_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1.0, 1.0],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
modality = dict(use_lidar=True, use_camera=False)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def _generate_kitti_multi_modality_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='Resize', multiscale_mode='value', keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
])
]
modality = dict(use_lidar=True, use_camera=True)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def test_getitem():
np.random.seed(0)
data_root, ann_file, classes, pts_prefix, \
_, modality, split = _generate_kitti_dataset_config()
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=dict(backend='disk')),
dict(
type='ObjectSample',
db_sampler=dict(
data_root='tests/data/kitti/',
info_path='tests/data/kitti/kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Pedestrian=10)),
classes=['Pedestrian', 'Cyclist', 'Car'],
sample_groups=dict(Pedestrian=6))),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='ObjectRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(type='PointShuffle'),
dict(
type='DefaultFormatBundle3D',
class_names=['Pedestrian', 'Cyclist', 'Car']),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
data = kitti_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])
expected_gt_labels_3d = torch.tensor([0])
rot_matrix = data['img_metas']._data['pcd_rotation']
rot_angle = data['img_metas']._data['pcd_rotation_angle']
horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
vertical_flip = data['img_metas']._data['pcd_vertical_flip']
expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],
[-0.5976, 0.8018, 0.0000],
[0.0000, 0.0000, 1.0000]])
expected_rot_angle = 0.6404654291602163
noise_angle = 0.20247319
assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)
assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)
assert horizontal_flip is True
assert vertical_flip is False
expected_gt_bboxes_3d[:, :3] = \
expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix
expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \
+ 2 * rot_angle - 2 * noise_angle
expected_gt_bboxes_3d[:, -1:] = limit_period(
expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)
assert points.shape == (780, 4)
assert torch.allclose(
gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)
assert torch.all(gt_labels_3d == expected_gt_labels_3d)
np.random.seed(0)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
multi_modality_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='Resize',
img_scale=[(640, 192), (2560, 768)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0.2, 0.2, 0.2]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=classes),
dict(
type='Collect3D',
keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),
]
modality = dict(use_lidar=True, use_camera=True)
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
data = kitti_dataset[0]
img = data['img']._data
lidar2img = data['img_metas']._data['lidar2img']
expected_lidar2img = np.array(
[[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],
[1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],
[9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
assert img.shape[:] == (3, 416, 1344)
assert np.allclose(lidar2img, expected_lidar2img)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
metric = ['mAP']
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
ap_dict = kitti_dataset.evaluate([result], metric)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],
3.0303030303030307)
def test_show():
from os import path as osp
import mmcv
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(
data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],
[33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],
[46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],
[33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],
[58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
_, _, _, _, multi_modality_pipeline, modality, _ = \
_generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
def test_format_results():
from mmdet3d.core.bbox import LiDARInstance3DBoxes
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
result_files, tmp_dir = kitti_dataset.format_results(results)
expected_name = np.array(['Pedestrian'])
expected_truncated = np.array([0.])
expected_occluded = np.array([0])
expected_alpha = np.array(-3.3410306 + np.pi)
expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])
expected_dimensions = np.array([[1.2, 1.89, 0.48]])
expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])
expected_rotation_y = np.array([0.0100])
expected_score = np.array([0.5])
expected_sample_idx = np.array([0])
assert np.all(result_files[0]['name'] == expected_name)
assert np.allclose(result_files[0]['truncated'], expected_truncated)
assert np.all(result_files[0]['occluded'] == expected_occluded)
assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)
assert np.allclose(result_files[0]['bbox'], expected_bbox)
assert np.allclose(result_files[0]['dimensions'], expected_dimensions)
assert np.allclose(result_files[0]['location'], expected_location)
assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,
1e-3)
assert np.allclose(result_files[0]['score'], expected_score)
assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)
tmp_dir.cleanup()
def test_bbox2result_kitti():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
expected_name = np.array(['Pedestrian'])
expected_dimensions = np.array([1.2000, 1.8900, 0.4800])
expected_rotation_y = 0.0100
expected_score = np.array([0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)
assert np.allclose(det_annos[0]['score'], expected_score)
assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))
labels_3d = torch.tensor([])
scores_3d = torch.tensor([])
empty_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [empty_result]
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
def test_bbox2result_kitti2d():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],
[33.3189, 0.1981, 0.3136, 0.5656, 0.5]],
[[46.1366, -4.6404, -0.9510, 0.5162, 0.5],
[33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(
['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],
[33.3189, 0.1981, 0.3136, 0.5656],
[46.1366, -4.6404, -0.951, 0.5162],
[33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score)
| true | true |
f71be6eab3547a134de4d6606f48330f24f83fbe | 8,080 | py | Python | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | scripts/build_load_data.py | millingermarkus/pypsa-eur | 2e39a21299036c0cec86fe4707de06a42ec15d62 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
load:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`load_cf`
Inputs
------
Outputs
-------
- ``resource/time_series_60min_singleindex_filtered.csv``:
"""
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
import dateutil
from pandas import Timedelta as Delta
def load_timeseries(fn, years, countries, powerstatistics=True):
"""
Read load data from OPSD time-series package version 2020-10-06.
Parameters
----------
years : None or slice()
Years for which to read load data (defaults to
slice("2018","2019"))
fn : str
File name or url location (file format .csv)
countries : listlike
Countries for which to read load data.
powerstatistics: bool
Whether the electricity consumption data of the ENTSOE power
statistics (if true) or of the ENTSOE transparency map (if false)
should be parsed.
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
def fill_large_gaps(ds, shift):
"""
Fill up large gaps with load data from the previous week.
This function fills gaps ragning from 3 to 168 hours (one week).
"""
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
def copy_timeslice(load, cntry, start, stop, delta):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
def manual_adjustment(load, powerstatistics):
"""
Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True)
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016.
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Parameters
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
Returns
-------
load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
"""
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
# is a WE, so take WE before
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
# whole january missing
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
configure_logging(snakemake)
config = snakemake.config
powerstatistics = config['load']['power_statistics']
interpolate_limit = config['load']['interpolate_limit']
countries = config['countries']
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps']
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
load.to_csv(snakemake.output[0])
| 35.752212 | 415 | 0.653713 |
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
import dateutil
from pandas import Timedelta as Delta
def load_timeseries(fn, years, countries, powerstatistics=True):
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
def fill_large_gaps(ds, shift):
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
def copy_timeslice(load, cntry, start, stop, delta):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
def manual_adjustment(load, powerstatistics):
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
configure_logging(snakemake)
config = snakemake.config
powerstatistics = config['load']['power_statistics']
interpolate_limit = config['load']['interpolate_limit']
countries = config['countries']
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps']
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
load.to_csv(snakemake.output[0])
| true | true |
f71be7330e1eaec34340d30bcdded8315ca8fdf3 | 5,076 | py | Python | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | 1 | 2020-02-09T17:43:43.000Z | 2020-02-09T17:43:43.000Z | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | null | null | null | tests/unit/driver/generic/test_generic_sync_driver.py | haccht/scrapli | 89589ee78c36296ee67813fcbedebee9b41b6bca | [
"MIT"
] | null | null | null | import pytest
from scrapli.driver.generic.base_driver import ReadCallback
from scrapli.exceptions import ScrapliValueError
def test_get_prompt(monkeypatch, sync_generic_driver):
# stupid test w/ the patch, but want coverage and in the future maybe the driver actually
# does something to the prompt it gets from the channel
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.get_prompt", lambda x: "scrapli>")
assert sync_generic_driver.get_prompt() == "scrapli>"
def test__send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver._send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test__send_command_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver._send_command(command="nada")
def test_send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_commands(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands(commands=["nada", "nada2"])
assert len(actual_response) == 2
assert actual_response.failed is False
assert actual_response[0].failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_commands_from_file(fs, monkeypatch, real_ssh_commands_file_path, sync_generic_driver):
fs.add_real_file(source_path=real_ssh_commands_file_path, target_path="/commands")
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands_from_file(file="commands")
assert actual_response.failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_and_read(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input_and_read",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_and_read(channel_input="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_and_read_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_and_read(channel_input="nada")
def test_send_interactive(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_inputs_interact",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_interactive(interact_events=[("nada", "scrapli>")])
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_interact_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_interactive(interact_events=[])
def test_readcallback_basic(monkeypatch, sync_generic_driver):
def _read(cls):
return b"rtr1#"
def _write(cls, channel_input, redacted=False):
return
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.read", _read)
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.write", _write)
callback_one_counter = 0
callback_two_counter = 0
def callback_one(cls, read_output):
nonlocal callback_one_counter
callback_one_counter += 1
def callback_two(cls, read_output):
nonlocal callback_two_counter
callback_two_counter += 1
callbacks = [
ReadCallback(
contains="rtr1#",
callback=callback_one,
name="call1",
case_insensitive=False,
only_once=True,
),
ReadCallback(
contains_re=r"^rtr1#",
callback=callback_two,
complete=True,
),
]
sync_generic_driver.read_callback(callbacks=callbacks, initial_input="nada")
assert callback_one_counter == 1
assert callback_two_counter == 1
| 35.496503 | 100 | 0.731678 | import pytest
from scrapli.driver.generic.base_driver import ReadCallback
from scrapli.exceptions import ScrapliValueError
def test_get_prompt(monkeypatch, sync_generic_driver):
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.get_prompt", lambda x: "scrapli>")
assert sync_generic_driver.get_prompt() == "scrapli>"
def test__send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver._send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test__send_command_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver._send_command(command="nada")
def test_send_command(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_command(command="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_commands(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands(commands=["nada", "nada2"])
assert len(actual_response) == 2
assert actual_response.failed is False
assert actual_response[0].failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_commands_from_file(fs, monkeypatch, real_ssh_commands_file_path, sync_generic_driver):
fs.add_real_file(source_path=real_ssh_commands_file_path, target_path="/commands")
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_commands_from_file(file="commands")
assert actual_response.failed is False
assert actual_response[0].result == "processed"
assert actual_response[0].raw_result == b"raw"
def test_send_and_read(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_input_and_read",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_and_read(channel_input="nada")
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_and_read_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_and_read(channel_input="nada")
def test_send_interactive(monkeypatch, sync_generic_driver):
monkeypatch.setattr(
"scrapli.channel.sync_channel.Channel.send_inputs_interact",
lambda _, **kwargs: (b"raw", b"processed"),
)
actual_response = sync_generic_driver.send_interactive(interact_events=[("nada", "scrapli>")])
assert actual_response.failed is False
assert actual_response.result == "processed"
assert actual_response.raw_result == b"raw"
def test_send_interact_no_base_transport_args(sync_generic_driver):
sync_generic_driver._base_transport_args = None
with pytest.raises(ScrapliValueError):
sync_generic_driver.send_interactive(interact_events=[])
def test_readcallback_basic(monkeypatch, sync_generic_driver):
def _read(cls):
return b"rtr1#"
def _write(cls, channel_input, redacted=False):
return
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.read", _read)
monkeypatch.setattr("scrapli.channel.sync_channel.Channel.write", _write)
callback_one_counter = 0
callback_two_counter = 0
def callback_one(cls, read_output):
nonlocal callback_one_counter
callback_one_counter += 1
def callback_two(cls, read_output):
nonlocal callback_two_counter
callback_two_counter += 1
callbacks = [
ReadCallback(
contains="rtr1#",
callback=callback_one,
name="call1",
case_insensitive=False,
only_once=True,
),
ReadCallback(
contains_re=r"^rtr1#",
callback=callback_two,
complete=True,
),
]
sync_generic_driver.read_callback(callbacks=callbacks, initial_input="nada")
assert callback_one_counter == 1
assert callback_two_counter == 1
| true | true |
f71be7afa0a994c58e2f0076275e35e23af1c2a5 | 16,119 | py | Python | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | utils_twc/kg.py | daiki-kimura/commonsense-rl | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | [
"Apache-2.0"
] | null | null | null | import sys
import networkx as nx
import logging
import json
import requests
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from utils_twc.generic import escape_entities
# Logging formatting
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)
kg = {}
source_paths= defaultdict(dict)
def shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):
if inventory_entities is None:
inventory_entities = []
if command_entities is None:
command_entities = []
# Get non-neighbor nodes: nodes without edges between them
world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()
world_graph = nx.compose(prev_graph,world_graph)
world_graph.remove_edges_from(nx.selfloop_edges(world_graph))
if path_len < 2:
return world_graph
triplets = []
# Add command related relations
pruned_entities = list(set(command_entities)-set(inventory_entities))
if pruned_entities:
for src_et in inventory_entities:
for tgt_et in pruned_entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist = 0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
else: # no items in the pruned entities, won't happen
for entities in command_entities:
for src_et in entities:
for tgt_et in entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist=0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
world_graph, _= add_triplets_to_graph(world_graph, triplets)
return world_graph
def construct_graph(triplets):
graph = nx.DiGraph()
entities = {}
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def add_triplets_to_graph(graph, triplets):
entities = dict(graph.nodes.data())
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def draw_graph(graph, title="cleanup", show_relation=True, weights=None, pos=None):
if not pos:
pos = nx.spring_layout(graph, k=0.95)
if weights:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),
vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,
label=title,cmap='Blues')
else:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)
if show_relation:
p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',
edge_labels=nx.get_edge_attributes(graph, 'relation'))
def draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):
# node_weights: maps node id/name to attention weights
pos = nx.spring_layout(graph, k=0.95)
weights = []
for node in graph.nodes:
weights.append(node_weights[node])
# cmap = plt.cm.YlGnBu#RdBu
cmap = plt.get_cmap(cmap)
vmin = np.min(weights)
vmax = np.max(weights)
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,
node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
if showbar:
plt.colorbar(sm)
plt.show()
def construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):
# access edges with graph.edges.data('relation')
if 'graph' in kg and cache_load:
return kg['graph'], kg['triplets'], kg['entities']
path = Path(filename)
if not path.exists():
filename = './kg/conceptnet/kg.txt'
triplets = []
with open(filename, 'r') as fp:
for idx, line in enumerate(fp):
e1, r, e2 = line.rstrip("\n").rsplit()
triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])
if idx % print_every == 0:
print("*",end='')
[graph, entities] = construct_graph(triplets)
graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a
if cache_load:
kg['graph'] = graph
kg['triplets'] = triplets
kg['entities'] = entities
return graph, triplets, entities
class RelationExtractor:
def __init__(self, tokenizer, openie_url="http://localhost:9000/"):
"""
:param tokenizer:
:param openie_url: server url for Stanford Core NLPOpen IE
"""
self.tokenizer = tokenizer
self.openie_url = openie_url
self.kg_vocab = {}
self.agent_loc = ''
def call_stanford_openie(self,sentence):
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", self.openie_url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
def fetch_triplets(self,text, current_graph, prev_action=None):
triplets = []
remove = []
prev_remove = []
link = []
c_id = len(self.kg_vocab.keys())
obs = self.tokenizer.clean_string(text, preprocess=True)
dirs = ['north', 'south', 'east', 'west']
obs = str(obs)
doc = self.tokenizer.nlp_eval(obs)
sents = {}
try:
sents = self.call_stanford_openie(doc.text)['sentences']
except:
print("Error in connecting to Stanford CoreNLP OpenIE Server")
for ov in sents:
tokens = ov["tokens"]
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()
if h == 'we':
h = 'you'
if r == 'are in':
r = "'ve entered"
if h == 'it':
break
triplets.append((h, r, t))
room = ""
room_set = False
for rule in triplets:
h, r, t = rule
if 'entered' in r or 'are in' in r or 'walked' in r:
prev_remove.append(r)
if not room_set:
room = t
room_set = True
if 'should' in r:
prev_remove.append(r)
if 'see' in r or 'make out' in r:
link.append((r, t))
remove.append(r)
# else:
# link.append((r, t))
prev_room = self.agent_loc
self.agent_loc = room
add_rules = []
if prev_action is not None:
for d in dirs:
if d in prev_action and room != "":
add_rules.append((prev_room, d + ' of', room))
prev_room_subgraph = None
prev_you_subgraph = None
for sent in doc.sents:
sent = sent.text
if sent == ',' or sent == 'hm .':
continue
if 'exit' in sent or 'entranceway' in sent:
for d in dirs:
if d in sent:
triplets.append((room, 'has', 'exit to ' + d))
if prev_room != "":
graph_copy = current_graph.copy()
graph_copy.remove_edge('you', prev_room)
con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]
for con_c in con_cs:
if prev_room in con_c.nodes:
prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
if 'you' in con_c.nodes:
prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
for l in link:
add_rules.append((room, l[0], l[1]))
for rule in triplets:
h, r, t = rule
if r == 'is in':
if t == 'room':
t = room
if r not in remove:
add_rules.append((h, r, t))
edges = list(current_graph.edges)
for edge in edges:
r = 'relatedTo'
if 'relation' in current_graph[edge[0]][edge[1]]:
r = current_graph[edge[0]][edge[1]]['relation']
if r in prev_remove:
current_graph.remove_edge(*edge)
if prev_you_subgraph is not None:
current_graph.remove_edges_from(prev_you_subgraph.edges)
for rule in add_rules:
u = '_'.join(str(rule[0]).split())
v = '_'.join(str(rule[2]).split())
if u != 'it' and u not in self.kg_vocab:
self.kg_vocab[u] = c_id
c_id += 1
if v != 'it' and v not in self.kg_vocab:
self.kg_vocab[v] = c_id
c_id += 1
skip_flag = False
for skip_token in self.tokenizer.ignore_list:
if skip_token in u or skip_token in v:
skip_flag = True
if u != 'it' and v != 'it' and not skip_flag:
r = str(rule[1]).lower()
if not rule[1] or rule[1] == '':
r = 'relatedTo'
current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)
prev_edges = current_graph.edges
if prev_room_subgraph is not None:
current_graph.add_edges_from(prev_room_subgraph.edges)
current_edges = current_graph.edges
return current_graph, add_rules
def khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):
all_entities = []
for et in entities:
candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()
if not max_khop_degree or len(candidates)<=max_khop_degree:
all_entities.extend(list(candidates))
return graph.subgraph(set(entities)|set(all_entities))
def ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
marked = set(seed)
nodes = set(seed)
for _ in range(radius):
border = set()
for node in marked:
neighbors = {n for n in working_graph[node]}
if max_degree is None or len(neighbors) <= max_degree:
border |= neighbors
nodes |= border
marked = border
return graph.subgraph(nodes)
def shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):
nodes = set(seed)
seed = list(seed)
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
for i in range(len(seed)):
start = i + 1 if undirected else 0
for j in range(start, len(seed)):
try:
if not keep_all:
path = nx.shortest_path(working_graph, seed[i], seed[j])
if cutoff is None or len(path) <= cutoff:
nodes |= set(path)
else:
paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])
for p in paths:
if cutoff is None or len(p) <= cutoff:
nodes |= set(p)
except nx.NetworkXNoPath:
continue
return graph.subgraph(nodes)
def load_manual_graphs(path):
path = Path(path)
manual_world_graphs = {}
if not path.exists():
print('None Found.')
return manual_world_graphs
files = path.rglob("conceptnet_manual_subgraph-*.tsv")
for file in files:
game_id = str(file).split('-')[-1].split('.')[0]
graph, triplets, entities = construct_kg(file, cache_load=False)
manual_world_graphs[game_id]={}
manual_world_graphs[game_id]['graph'] = graph
manual_world_graphs[game_id]['triplets'] = triplets
manual_world_graphs[game_id]['entities'] = entities
print(' DONE')
return manual_world_graphs
def kg_match(extractor, target_entities, kg_entities):
result = set()
kg_entities = escape_entities(kg_entities)
for e in target_entities:
e = e.lower().strip()
result |= extractor(e, kg_entities)
return result
def save_graph_tsv(graph, path):
relation_map = nx.get_edge_attributes(graph, 'relation')
lines = []
for n1, n2 in graph.edges:
relations = relation_map[n1, n2].split()
for r in relations:
lines.append(f'{n1}\t{r}\t{n2}\n')
with open(path, 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
from utils_twc import extractor
from utils_twc.nlp import Tokenizer
tk_extractor = extractor.get_extractor('max')
tokenizer = Tokenizer(extractor=tk_extractor)
rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')
# text = 'On the table, you see an apple, a hat, a key and an umbrella. '
text = "You've just walked into a Living Room. You try to gain information on your " \
"surroundings by using a technique you call looking. You can see a closet. " \
"You idly wonder how they came up with the name TextWorld for this place. " \
"It's pretty fitting. A closed standard looking antique trunk is in the room. " \
"You can see a table. The table is usual. On the table you see an apple, a mug, " \
"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow " \
"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a " \
"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. " \
"On the tv stand you can make out a tv. You don't like doors? Why not try going east, " \
"that entranceway is unguarded. You are carrying nothing."
sents = text
# clauses = clausie.clausie(text)
# propositions = clausie.extract_propositions(clauses)
# sents = ''
# for prop in propositions:
# sent = clausie.proposition_text_str(prop)
# sents += sent
# print(sent)
graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())
print(add_rules)
| 37.927059 | 136 | 0.578262 | import sys
import networkx as nx
import logging
import json
import requests
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from utils_twc.generic import escape_entities
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)
kg = {}
source_paths= defaultdict(dict)
def shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):
if inventory_entities is None:
inventory_entities = []
if command_entities is None:
command_entities = []
world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()
world_graph = nx.compose(prev_graph,world_graph)
world_graph.remove_edges_from(nx.selfloop_edges(world_graph))
if path_len < 2:
return world_graph
triplets = []
pruned_entities = list(set(command_entities)-set(inventory_entities))
if pruned_entities:
for src_et in inventory_entities:
for tgt_et in pruned_entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist = 0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
else:
for entities in command_entities:
for src_et in entities:
for tgt_et in entities:
if src_et != tgt_et:
try:
pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)
except nx.NetworkXNoPath:
pair_dist=0
if pair_dist >= 1 and pair_dist <= path_len:
triplets.append([src_et, tgt_et, 'relatedTo'])
world_graph, _= add_triplets_to_graph(world_graph, triplets)
return world_graph
def construct_graph(triplets):
graph = nx.DiGraph()
entities = {}
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def add_triplets_to_graph(graph, triplets):
entities = dict(graph.nodes.data())
for [e1, e2, r] in triplets:
e1 = e1.lower().strip()
e2 = e2.lower().strip()
r = r.lower().strip()
if e1 not in entities:
graph.add_node(e1)
entities[e1] = e1
if e2 not in entities:
graph.add_node(e2)
entities[e2] = e2
# Add Edge information
if graph.has_edge(e1, e2):
if r not in graph.edges[e1, e2]['relation']:
graph.edges[e1, e2]['relation'] += ' ' + r
else:
graph.add_edge(e1, e2, relation=r)
return graph, entities
def draw_graph(graph, title="cleanup", show_relation=True, weights=None, pos=None):
if not pos:
pos = nx.spring_layout(graph, k=0.95)
if weights:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),
vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,
label=title,cmap='Blues')
else:
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)
if show_relation:
p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',
edge_labels=nx.get_edge_attributes(graph, 'relation'))
def draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):
# node_weights: maps node id/name to attention weights
pos = nx.spring_layout(graph, k=0.95)
weights = []
for node in graph.nodes:
weights.append(node_weights[node])
# cmap = plt.cm.YlGnBu#RdBu
cmap = plt.get_cmap(cmap)
vmin = np.min(weights)
vmax = np.max(weights)
nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,
node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,
node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
if showbar:
plt.colorbar(sm)
plt.show()
def construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):
# access edges with graph.edges.data('relation')
if 'graph' in kg and cache_load:
return kg['graph'], kg['triplets'], kg['entities']
path = Path(filename)
if not path.exists():
filename = './kg/conceptnet/kg.txt'
triplets = []
with open(filename, 'r') as fp:
for idx, line in enumerate(fp):
e1, r, e2 = line.rstrip("\n").rsplit()
triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])
if idx % print_every == 0:
print("*",end='')
[graph, entities] = construct_graph(triplets)
graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a
if cache_load:
kg['graph'] = graph
kg['triplets'] = triplets
kg['entities'] = entities
return graph, triplets, entities
class RelationExtractor:
def __init__(self, tokenizer, openie_url="http://localhost:9000/"):
self.tokenizer = tokenizer
self.openie_url = openie_url
self.kg_vocab = {}
self.agent_loc = ''
def call_stanford_openie(self,sentence):
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", self.openie_url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
def fetch_triplets(self,text, current_graph, prev_action=None):
triplets = []
remove = []
prev_remove = []
link = []
c_id = len(self.kg_vocab.keys())
obs = self.tokenizer.clean_string(text, preprocess=True)
dirs = ['north', 'south', 'east', 'west']
obs = str(obs)
doc = self.tokenizer.nlp_eval(obs)
sents = {}
try:
sents = self.call_stanford_openie(doc.text)['sentences']
except:
print("Error in connecting to Stanford CoreNLP OpenIE Server")
for ov in sents:
tokens = ov["tokens"]
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()
if h == 'we':
h = 'you'
if r == 'are in':
r = "'ve entered"
if h == 'it':
break
triplets.append((h, r, t))
room = ""
room_set = False
for rule in triplets:
h, r, t = rule
if 'entered' in r or 'are in' in r or 'walked' in r:
prev_remove.append(r)
if not room_set:
room = t
room_set = True
if 'should' in r:
prev_remove.append(r)
if 'see' in r or 'make out' in r:
link.append((r, t))
remove.append(r)
prev_room = self.agent_loc
self.agent_loc = room
add_rules = []
if prev_action is not None:
for d in dirs:
if d in prev_action and room != "":
add_rules.append((prev_room, d + ' of', room))
prev_room_subgraph = None
prev_you_subgraph = None
for sent in doc.sents:
sent = sent.text
if sent == ',' or sent == 'hm .':
continue
if 'exit' in sent or 'entranceway' in sent:
for d in dirs:
if d in sent:
triplets.append((room, 'has', 'exit to ' + d))
if prev_room != "":
graph_copy = current_graph.copy()
graph_copy.remove_edge('you', prev_room)
con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]
for con_c in con_cs:
if prev_room in con_c.nodes:
prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
if 'you' in con_c.nodes:
prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
for l in link:
add_rules.append((room, l[0], l[1]))
for rule in triplets:
h, r, t = rule
if r == 'is in':
if t == 'room':
t = room
if r not in remove:
add_rules.append((h, r, t))
edges = list(current_graph.edges)
for edge in edges:
r = 'relatedTo'
if 'relation' in current_graph[edge[0]][edge[1]]:
r = current_graph[edge[0]][edge[1]]['relation']
if r in prev_remove:
current_graph.remove_edge(*edge)
if prev_you_subgraph is not None:
current_graph.remove_edges_from(prev_you_subgraph.edges)
for rule in add_rules:
u = '_'.join(str(rule[0]).split())
v = '_'.join(str(rule[2]).split())
if u != 'it' and u not in self.kg_vocab:
self.kg_vocab[u] = c_id
c_id += 1
if v != 'it' and v not in self.kg_vocab:
self.kg_vocab[v] = c_id
c_id += 1
skip_flag = False
for skip_token in self.tokenizer.ignore_list:
if skip_token in u or skip_token in v:
skip_flag = True
if u != 'it' and v != 'it' and not skip_flag:
r = str(rule[1]).lower()
if not rule[1] or rule[1] == '':
r = 'relatedTo'
current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)
prev_edges = current_graph.edges
if prev_room_subgraph is not None:
current_graph.add_edges_from(prev_room_subgraph.edges)
current_edges = current_graph.edges
return current_graph, add_rules
def khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):
all_entities = []
for et in entities:
candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()
if not max_khop_degree or len(candidates)<=max_khop_degree:
all_entities.extend(list(candidates))
return graph.subgraph(set(entities)|set(all_entities))
def ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
marked = set(seed)
nodes = set(seed)
for _ in range(radius):
border = set()
for node in marked:
neighbors = {n for n in working_graph[node]}
if max_degree is None or len(neighbors) <= max_degree:
border |= neighbors
nodes |= border
marked = border
return graph.subgraph(nodes)
def shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):
nodes = set(seed)
seed = list(seed)
working_graph = graph
if undirected:
working_graph = graph.to_undirected()
for i in range(len(seed)):
start = i + 1 if undirected else 0
for j in range(start, len(seed)):
try:
if not keep_all:
path = nx.shortest_path(working_graph, seed[i], seed[j])
if cutoff is None or len(path) <= cutoff:
nodes |= set(path)
else:
paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])
for p in paths:
if cutoff is None or len(p) <= cutoff:
nodes |= set(p)
except nx.NetworkXNoPath:
continue
return graph.subgraph(nodes)
def load_manual_graphs(path):
path = Path(path)
manual_world_graphs = {}
if not path.exists():
print('None Found.')
return manual_world_graphs
files = path.rglob("conceptnet_manual_subgraph-*.tsv")
for file in files:
game_id = str(file).split('-')[-1].split('.')[0]
graph, triplets, entities = construct_kg(file, cache_load=False)
manual_world_graphs[game_id]={}
manual_world_graphs[game_id]['graph'] = graph
manual_world_graphs[game_id]['triplets'] = triplets
manual_world_graphs[game_id]['entities'] = entities
print(' DONE')
return manual_world_graphs
def kg_match(extractor, target_entities, kg_entities):
result = set()
kg_entities = escape_entities(kg_entities)
for e in target_entities:
e = e.lower().strip()
result |= extractor(e, kg_entities)
return result
def save_graph_tsv(graph, path):
relation_map = nx.get_edge_attributes(graph, 'relation')
lines = []
for n1, n2 in graph.edges:
relations = relation_map[n1, n2].split()
for r in relations:
lines.append(f'{n1}\t{r}\t{n2}\n')
with open(path, 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
from utils_twc import extractor
from utils_twc.nlp import Tokenizer
tk_extractor = extractor.get_extractor('max')
tokenizer = Tokenizer(extractor=tk_extractor)
rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')
text = "You've just walked into a Living Room. You try to gain information on your " \
"surroundings by using a technique you call looking. You can see a closet. " \
"You idly wonder how they came up with the name TextWorld for this place. " \
"It's pretty fitting. A closed standard looking antique trunk is in the room. " \
"You can see a table. The table is usual. On the table you see an apple, a mug, " \
"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow " \
"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a " \
"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. " \
"On the tv stand you can make out a tv. You don't like doors? Why not try going east, " \
"that entranceway is unguarded. You are carrying nothing."
sents = text
# clauses = clausie.clausie(text)
# propositions = clausie.extract_propositions(clauses)
# sents = ''
# for prop in propositions:
# sent = clausie.proposition_text_str(prop)
# sents += sent
# print(sent)
graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())
print(add_rules)
| true | true |
f71be86a3b944cd3f5b50d8b2127cb921b32bfb6 | 1,048 | py | Python | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/bed/__main__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | import argparse
from .iterator import BedEntryIterator
from lhc.io.bed.tools import depth, sort, filter
from lhc.io.txt.tools import compress
def iter_bed(fname):
it = BedEntryIterator(fname)
for entry in it:
yield entry
it.close()
def main():
args = get_parser().parse_args()
args.func(args)
def get_parser():
return define_parser(argparse.ArgumentParser())
def define_parser(parser):
subparsers = parser.add_subparsers()
# Compress parser
compress_parser = subparsers.add_parser('compress')
compress.define_parser(compress_parser)
compress_parser.set_defaults(block_delimiter='\n')
# Depth parser
depth_parser = subparsers.add_parser('depth')
depth.define_parser(depth_parser)
# Filter parser
filter_parser = subparsers.add_parser('filter')
filter.define_parser(filter_parser)
# Sort parser
sort_parser = subparsers.add_parser('sort')
sort.define_parser(sort_parser)
return parser
if __name__ == '__main__':
import sys
sys.exit(main())
| 23.818182 | 55 | 0.719466 | import argparse
from .iterator import BedEntryIterator
from lhc.io.bed.tools import depth, sort, filter
from lhc.io.txt.tools import compress
def iter_bed(fname):
it = BedEntryIterator(fname)
for entry in it:
yield entry
it.close()
def main():
args = get_parser().parse_args()
args.func(args)
def get_parser():
return define_parser(argparse.ArgumentParser())
def define_parser(parser):
subparsers = parser.add_subparsers()
compress_parser = subparsers.add_parser('compress')
compress.define_parser(compress_parser)
compress_parser.set_defaults(block_delimiter='\n')
depth_parser = subparsers.add_parser('depth')
depth.define_parser(depth_parser)
filter_parser = subparsers.add_parser('filter')
filter.define_parser(filter_parser)
sort_parser = subparsers.add_parser('sort')
sort.define_parser(sort_parser)
return parser
if __name__ == '__main__':
import sys
sys.exit(main())
| true | true |
f71be8fe9b1bed16fe43ad8a1ea794cdbcec63d2 | 7,605 | py | Python | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:59:36.000Z | 2022-03-12T00:52:11.000Z | tests/test_bootstrap.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Test the bootstrapping."""
# pylint: disable=protected-access
import asyncio
import logging
import os
from unittest.mock import Mock, patch
from homeassistant import bootstrap
import homeassistant.config as config_util
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
get_test_config_dir,
mock_coro,
mock_integration,
patch_yaml_files,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
# prevent .HA_VERSION file from being written
@patch("homeassistant.bootstrap.conf_util.process_ha_config_upgrade", Mock())
@patch(
"homeassistant.util.location.async_detect_location_info",
Mock(return_value=mock_coro(None)),
)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
@patch("homeassistant.bootstrap.async_enable_logging", Mock(return_value=True))
def test_from_config_file(hass):
"""Test with configuration file."""
components = set(["browser", "conversation", "script"])
files = {"config.yaml": "".join("{}:\n".format(comp) for comp in components)}
with patch_yaml_files(files, True):
yield from bootstrap.async_from_config_file("config.yaml", hass)
assert components == hass.config.components
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
@asyncio.coroutine
def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = yield from bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_from_config_file_not_mount_deps_folder(loop):
"""Test that we not mount the deps folder inside async_from_config_file."""
hass = Mock(async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))
with patch("homeassistant.bootstrap.is_virtual_env", return_value=False), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 1
with patch("homeassistant.bootstrap.is_virtual_env", return_value=True), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 0
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap._async_set_up_integrations(hass, {})
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_aborts(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap._async_set_up_integrations(hass, {"group": {}})
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
async def test_setting_up_config(hass, caplog):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass, caplog):
"""Test after_dependencies when all present."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "first_dep", "second_dep"]
async def test_setup_after_deps_not_trigger_load(hass, caplog):
"""Test after_dependencies does not trigger loading it."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
async def test_setup_after_deps_not_present(hass, caplog):
"""Test after_dependencies when referenced integration doesn't exist."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
| 32.224576 | 86 | 0.682446 |
import asyncio
import logging
import os
from unittest.mock import Mock, patch
from homeassistant import bootstrap
import homeassistant.config as config_util
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
get_test_config_dir,
mock_coro,
mock_integration,
patch_yaml_files,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
@patch("homeassistant.bootstrap.conf_util.process_ha_config_upgrade", Mock())
@patch(
"homeassistant.util.location.async_detect_location_info",
Mock(return_value=mock_coro(None)),
)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
@patch("homeassistant.bootstrap.async_enable_logging", Mock(return_value=True))
def test_from_config_file(hass):
components = set(["browser", "conversation", "script"])
files = {"config.yaml": "".join("{}:\n".format(comp) for comp in components)}
with patch_yaml_files(files, True):
yield from bootstrap.async_from_config_file("config.yaml", hass)
assert components == hass.config.components
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
@asyncio.coroutine
def test_home_assistant_core_config_validation(hass):
result = yield from bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_from_config_file_not_mount_deps_folder(loop):
hass = Mock(async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))
with patch("homeassistant.bootstrap.is_virtual_env", return_value=False), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 1
with patch("homeassistant.bootstrap.is_virtual_env", return_value=True), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 0
async def test_load_hassio(hass):
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
await bootstrap._async_set_up_integrations(hass, {})
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_aborts(hass, caplog):
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap._async_set_up_integrations(hass, {"group": {}})
assert "core failed to initialize" in caplog.text
assert "group" not in hass.config.components
async def test_setting_up_config(hass, caplog):
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "first_dep", "second_dep"]
async def test_setup_after_deps_not_trigger_load(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
async def test_setup_after_deps_not_present(hass, caplog):
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
| true | true |
f71beb6536594f288089770f787a036271ea7c72 | 1,832 | py | Python | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 808 | 2018-04-17T17:43:12.000Z | 2019-08-18T07:39:13.000Z | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 728 | 2018-04-18T08:15:25.000Z | 2019-08-16T07:14:43.000Z | lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 364 | 2018-04-18T17:05:02.000Z | 2019-08-18T03:25:38.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100), min_size=1, max_size=1))
def generate_IndexTensor():
return np.random.randint(1, 5, size=in_shape).astype(np.int32)
unique_with_counts_op = OpConfig(
type="unique_with_counts",
inputs={"X": ["input_data"]},
outputs={
"Out": ["output_data"],
"Index": ["Index_data"],
"Count": ["Count_data"]
},
attrs={"dtype": 2})
program_config = ProgramConfig(
ops=[unique_with_counts_op],
weights={
"Index_data": TensorConfig(data_gen=partial(generate_IndexTensor))
},
inputs={"input_data": TensorConfig(shape=in_shape), },
outputs=["output_data", "Index_data", "Count_data"])
return program_config
| 34.566038 | 125 | 0.68559 |
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100), min_size=1, max_size=1))
def generate_IndexTensor():
return np.random.randint(1, 5, size=in_shape).astype(np.int32)
unique_with_counts_op = OpConfig(
type="unique_with_counts",
inputs={"X": ["input_data"]},
outputs={
"Out": ["output_data"],
"Index": ["Index_data"],
"Count": ["Count_data"]
},
attrs={"dtype": 2})
program_config = ProgramConfig(
ops=[unique_with_counts_op],
weights={
"Index_data": TensorConfig(data_gen=partial(generate_IndexTensor))
},
inputs={"input_data": TensorConfig(shape=in_shape), },
outputs=["output_data", "Index_data", "Count_data"])
return program_config
| true | true |
f71bebf8420c687bf788c8fe852634cda08565c9 | 54,956 | py | Python | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | 2 | 2019-02-21T10:43:16.000Z | 2019-07-30T04:56:37.000Z | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | null | null | null | scikit-learn-master/sklearn/linear_model/ridge.py | lqkweb/learnMLflow | 13c5decaebba95b1b90f92021be35e343b4764af | [
"Apache-2.0"
] | null | null | null | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics.scorer import check_scoring
from ..exceptions import ConvergenceWarning
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'saga' supports sparse input when`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
# SAG needs X and y columns to be C-contiguous and np.float64
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However,
only 'sag' and 'saga' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
:class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float
Precision of the solution.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9595...
See also
--------
Ridge : Ridge regression
RidgeClassifierCV : Ridge classifier with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
if np.any(self.alphas < 0):
raise ValueError("alphas cannot be negative. "
"Got {} containing some "
"negative value instead.".format(self.alphas))
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(float(alpha), y, v, Q, QT_y)
else:
out, c = _values(float(alpha), y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True``\
and ``cv=None``). After ``fit()`` has been called, this attribute \
will contain the mean squared errors (by default) or the values \
of the ``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.5166...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeClassifierCV : Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors (by default) or the values of the
``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9630...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| 38.32357 | 79 | 0.619951 |
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics.scorer import check_scoring
from ..exceptions import ConvergenceWarning
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
K.flat[::n_samples + 1] += alpha[0]
try:
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
if np.any(self.alphas < 0):
raise ValueError("alphas cannot be negative. "
"Got {} containing some "
"negative value instead.".format(self.alphas))
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(float(alpha), y, v, Q, QT_y)
else:
out, c = _values(float(alpha), y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| true | true |
f71bec5753bc8e9f3a6c6cc2541b07d58772b075 | 1,201 | py | Python | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 34 | 2015-12-26T22:13:51.000Z | 2021-11-17T11:46:37.000Z | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 13 | 2015-09-11T23:27:51.000Z | 2018-06-25T20:44:28.000Z | tests/tests/test_design/test_randomcodons.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 14 | 2015-10-08T17:08:48.000Z | 2022-02-22T04:25:54.000Z | '''
Tests for RandomCodons class of analysis module.
'''
from nose.tools import assert_equal, assert_not_equal, assert_raises
from coral import design, reaction, RNA
def test_randomcodons():
'''
This test is pretty basic right now - not sure how much checking
can be done for a random DNA base generator.
'''
reference_seq = RNA('AUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAG')
reference_peptide = reaction.translate(reference_seq)
output = design.random_codons(reference_peptide)
output_peptide = reaction.translate(reference_seq)
assert_equal(len(output), len(reference_seq) - 3)
assert_equal(reference_peptide, output_peptide)
assert_not_equal(reference_seq, output)
# Setting too high a threshold should raise ValueError
assert_raises(ValueError, design.random_codons, reference_peptide,
frequency_cutoff=1.5)
# Weighted should work
w_output = design.random_codons(reference_peptide, weighted=True)
w_output_peptide = reaction.translate(reference_seq)
assert_equal(len(w_output), len(reference_seq) - 3)
assert_equal(reference_peptide, w_output_peptide)
assert_not_equal(reference_seq, w_output)
| 32.459459 | 70 | 0.757702 |
from nose.tools import assert_equal, assert_not_equal, assert_raises
from coral import design, reaction, RNA
def test_randomcodons():
reference_seq = RNA('AUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAG')
reference_peptide = reaction.translate(reference_seq)
output = design.random_codons(reference_peptide)
output_peptide = reaction.translate(reference_seq)
assert_equal(len(output), len(reference_seq) - 3)
assert_equal(reference_peptide, output_peptide)
assert_not_equal(reference_seq, output)
assert_raises(ValueError, design.random_codons, reference_peptide,
frequency_cutoff=1.5)
w_output = design.random_codons(reference_peptide, weighted=True)
w_output_peptide = reaction.translate(reference_seq)
assert_equal(len(w_output), len(reference_seq) - 3)
assert_equal(reference_peptide, w_output_peptide)
assert_not_equal(reference_seq, w_output)
| true | true |
f71bec9bd1ce0214c2dc30775d89387898d9b1fb | 328 | py | Python | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | truck_app/migrations/0010_remove_truck_likes.py | Svetloni89/truck_project | e365d2a84f32e62f8dbc5c371a3355dbbe93557d | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-14 08:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('truck_app', '0009_auto_20201112_1114'),
]
operations = [
migrations.RemoveField(
model_name='truck',
name='likes',
),
]
| 18.222222 | 49 | 0.591463 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('truck_app', '0009_auto_20201112_1114'),
]
operations = [
migrations.RemoveField(
model_name='truck',
name='likes',
),
]
| true | true |
f71bedbe0e1dd8d7b00f8bfa2461591aca993afa | 2,299 | py | Python | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | example/exampleapp/models.py | jphilip/django-treewidget | e2eff61f98ea7a520f29a4ed7395ec75d134246b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treewidget.fields import TreeForeignKey, TreeManyToManyField
from django.utils.encoding import python_2_unicode_compatible
# django-mptt
@python_2_unicode_compatible
class Mptt(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey(
'self', blank=True, null=True, on_delete=models.CASCADE, settings={'filtered': True})
def __str__(self):
return self.name
# django-treebeard
@python_2_unicode_compatible
class Treebeardmp(MP_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardal(AL_Node):
name = models.CharField(max_length=32)
parent = models.ForeignKey('self', related_name='children_set', null=True,
db_index=True, on_delete=models.CASCADE)
sib_order = models.PositiveIntegerField()
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardns(NS_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
class Example(models.Model):
mptt = TreeForeignKey(Mptt, on_delete=models.CASCADE)
treebeardmp = TreeForeignKey(Treebeardmp, on_delete=models.CASCADE,
settings={'show_buttons': True, 'filtered': True})
treebeardal = TreeForeignKey(Treebeardal, on_delete=models.CASCADE,
settings={'search': True, 'dnd': True, 'sort': True})
treebeardns = TreeForeignKey(Treebeardns, on_delete=models.CASCADE,
settings={'dnd': True})
mptt_many = TreeManyToManyField(Mptt, related_name='example_many',
settings={'show_buttons': True, 'search': True, 'dnd': True})
treebeardmp_many = TreeManyToManyField(Treebeardmp, related_name='example_many')
treebeardal_many = TreeManyToManyField(Treebeardal, related_name='example_many')
treebeardns_many = TreeManyToManyField(Treebeardns, related_name='example_many')
| 35.921875 | 97 | 0.702044 |
from __future__ import unicode_literals
from django.db import models
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treewidget.fields import TreeForeignKey, TreeManyToManyField
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Mptt(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey(
'self', blank=True, null=True, on_delete=models.CASCADE, settings={'filtered': True})
def __str__(self):
return self.name
@python_2_unicode_compatible
class Treebeardmp(MP_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardal(AL_Node):
name = models.CharField(max_length=32)
parent = models.ForeignKey('self', related_name='children_set', null=True,
db_index=True, on_delete=models.CASCADE)
sib_order = models.PositiveIntegerField()
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardns(NS_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
class Example(models.Model):
mptt = TreeForeignKey(Mptt, on_delete=models.CASCADE)
treebeardmp = TreeForeignKey(Treebeardmp, on_delete=models.CASCADE,
settings={'show_buttons': True, 'filtered': True})
treebeardal = TreeForeignKey(Treebeardal, on_delete=models.CASCADE,
settings={'search': True, 'dnd': True, 'sort': True})
treebeardns = TreeForeignKey(Treebeardns, on_delete=models.CASCADE,
settings={'dnd': True})
mptt_many = TreeManyToManyField(Mptt, related_name='example_many',
settings={'show_buttons': True, 'search': True, 'dnd': True})
treebeardmp_many = TreeManyToManyField(Treebeardmp, related_name='example_many')
treebeardal_many = TreeManyToManyField(Treebeardal, related_name='example_many')
treebeardns_many = TreeManyToManyField(Treebeardns, related_name='example_many')
| true | true |
f71bf0329ac143d3243fa730424a4eebdde5ed63 | 2,519 | py | Python | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | letsencrypt/constants.py | felixonmars/certbot | 324ebf468db402dbe44ba780ed2df682ab99af86 | [
"Apache-2.0"
] | null | null | null | """Let's Encrypt constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
)
"""Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.TLSSNI01, challenges.HTTP01])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CSR_DIR = "csr"
"""See `.IConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
RENEWER_CONFIG_FILENAME = "renewer.conf"
"""Renewer config file name (relative to `IConfig.config_dir`)."""
| 27.086022 | 73 | 0.710599 | import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
)
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.TLSSNI01, challenges.HTTP01])])
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
ARCHIVE_DIR = "archive"
CONFIG_DIRS_MODE = 0o755
ACCOUNTS_DIR = "accounts"
BACKUP_DIR = "backups"
CSR_DIR = "csr"
IN_PROGRESS_DIR = "IN_PROGRESS"
KEY_DIR = "keys"
LIVE_DIR = "live"
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
RENEWAL_CONFIGS_DIR = "renewal"
RENEWER_CONFIG_FILENAME = "renewer.conf"
| true | true |
f71bf03d595623a0ca93dde19a49a3566fbf4da1 | 2,082 | py | Python | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | scraper.py | Luisf718/web_scraping | df599a1ea77a1475a05d97884f57a4c3761adec5 | [
"MIT"
] | null | null | null | import requests
import lxml.html as html
import os
import datetime
#El text-fill es un h2 pero la libreria no identifica el h2 como tal sino como text-fill
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_TITLE = '//div[@class="mb-auto"]/text-fill/span//text()'
XPATH_SUMMARY = '//div[@class="lead"]/p//text()'
XPATH_BODY = '//div[@class="html-content"]/p//text()'
def parse_notice(link, today):
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)[0]
title = title.replace('\"','')
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
except IndexError:
return
with open(f'{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(str(title))
f.write('\n\n')
f.write(str(summary))
f.write('\n\n')
for p in body:
f.write(str(p))
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(links_to_notices)
today = datetime.date.today().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run() | 30.617647 | 88 | 0.548511 | import requests
import lxml.html as html
import os
import datetime
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_TITLE = '//div[@class="mb-auto"]/text-fill/span//text()'
XPATH_SUMMARY = '//div[@class="lead"]/p//text()'
XPATH_BODY = '//div[@class="html-content"]/p//text()'
def parse_notice(link, today):
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)[0]
title = title.replace('\"','')
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
except IndexError:
return
with open(f'{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(str(title))
f.write('\n\n')
f.write(str(summary))
f.write('\n\n')
for p in body:
f.write(str(p))
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(links_to_notices)
today = datetime.date.today().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run() | true | true |
f71bf0555abfc8db7bc79d24972ecc0523d5c881 | 5,928 | py | Python | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | 6 | 2021-05-20T14:46:23.000Z | 2022-01-24T07:07:55.000Z | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | null | null | null | scaffold_generator/defaults.py | iamswaroopp/django-scaffold-generator | c9aa7269c3a3d9a618dbf41aac8e23649be64d48 | [
"MIT"
] | null | null | null | DEFAULT_SETTINGS = {
'CREATE_HTML_VIEW_RESOURCES': True,
'CREATE_REST_VIEW_RESOURCES': True,
'DEFAULT_MODEL_IMPORTS': [],
'DEFAULT_FORM_IMPORTS': [
'django.forms',
],
'FIELDS': {},
'MODEL_EXTRA_IMPORT_CLASSES': [
'django.db.models',
],
'MODEL_PARENT_CLASSES': ['django.db.models.Model'],
'FORM_EXTRA_IMPORT_CLASSES': ['django.forms'],
'FORM_PARENT_CLASSES': ['django.forms.ModelForm'],
'VIEW_EXTRA_IMPORT_CLASSES': [],
'VIEW_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'VIEW_PERMISSION_CODES': [
'view',
],
'LIST_VIEW_PARENT_CLASSES': ['django.views.generic.list.ListView'],
'DETAIL_VIEW_PARENT_CLASSES': ['django.views.generic.detail.DetailView'],
'ADD_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'ADD_PERMISSION_CODES': ['add', 'create'],
'CREATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.CreateView'],
'CREATE_URL_PATH': 'create',
'CHANGE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'CHANGE_PERMISSION_CODES': [
'change',
],
'UPDATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.UpdateView'],
'UPDATE_URL_PATH': 'update',
'DELETE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'DELETE_PERMISSION_CODES': [
'delete',
],
'DELETE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.DeleteView'],
'DELETE_URL_PATH': 'delete',
'ADMIN_EXTRA_IMPORT_CLASSES': ['django.contrib.admin'],
'ADMIN_PARENT_CLASSES': ['django.contrib.admin.ModelAdmin'],
'URL_EXTRA_IMPORT_CLASSES': ['django.urls.path'],
'SCAFFOLD_REST_FRAMEWORK': True,
'REST_FRAMEWORK_SERIALIZER_EXTRA_IMPORT_CLASSES': ['rest_framework.serializers'],
'REST_FRAMEWORK_SERIALIZER_PARENT_CLASSES': ['rest_framework.serializers.ModelSerializer'],
'REST_FRAMEWORK_VIEWSET_EXTRA_IMPORT_CLASSES': ['rest_framework.viewsets'],
'REST_FRAMEWORK_VIEWSET_PARENT_CLASSES': ['rest_framework.viewsets.ModelViewSet'],
'REST_FRAMEWORK_VIEWSET_PERMISSION_CLASSES': ['rest_framework.permissions.DjangoModelPermissions'],
'REST_FRAMEWORK_DEFAULT_ROUTER': 'rest_framework.routers.DefaultRouter',
'SCAFFOLD_TEMPLATES': False,
'FORM_EXTRA': '',
'TEMPLATE_VIEW_LIST': 'scaffold_generator/views/model_list.html.template',
'TEMPLATE_VIEW_DETAIL': 'scaffold_generator/views/model_detail.html.template',
'TEMPLATE_VIEW_FORM': 'scaffold_generator/views/model_form.html.template',
'TEMPLATE_VIEW_DELETE': 'scaffold_generator/views/model_delete.html.template',
'ADD_LIST_VIEW_TO_NAVBAR_TEMPLATE': '',
'NAVBAR_ITEM_TEMPLATE': 'scaffold_generator/navbar_item.html.template',
}
DEFAULT_FIELDS = {
'AutoField': {
'class_name': 'models.AutoField',
},
'BigAutoField': {
'class_name': 'models.BigAutoField',
},
'BigIntegerField': {
'class_name': 'models.BigIntegerField',
},
'BinaryField': {
'class_name': 'models.BinaryField',
},
'BooleanField': {
'class_name': 'models.BooleanField',
},
'CharField': {
'class_name': 'models.CharField',
'default_kwargs': {
'max_length': '128',
},
'nullable': False,
},
'CommaSeparatedIntegerField': {
'class_name': 'models.CommaSeparatedIntegerField',
'nullable': False,
},
'DateField': {
'class_name': 'models.DateField',
},
'DateTimeField': {
'class_name': 'models.DateTimeField',
},
'DecimalField': {
'class_name': 'models.DecimalField',
},
'DurationField': {
'class_name': 'models.DurationField',
},
'EmailField': {
'class_name': 'models.EmailField',
'nullable': False,
},
'FileField': {
'class_name': 'models.FileField',
'nullable': False,
},
'FilePathField': {
'class_name': 'models.FilePathField',
},
'FloatField': {
'class_name': 'models.FloatField',
},
'ForeignKey': {
'class_name': 'models.ForeignKey',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'GenericIPAddressField': {
'class_name': 'models.GenericIPAddressField',
},
'IPAddressField': {
'class_name': 'models.IPAddressField',
},
'ImageField': {
'class_name': 'models.ImageField',
'nullable': False,
},
'IntegerField': {
'class_name': 'models.IntegerField',
},
'JSONField': {
'class_name': 'models.JSONField',
},
'ManyToManyField': {
'class_name': 'models.ManyToManyField',
'nullable': False,
},
'NullBooleanField': {
'class_name': 'models.NullBooleanField',
},
'OneToOneField': {
'class_name': 'models.OneToOneField',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'PositiveBigIntegerField': {
'class_name': 'models.PositiveBigIntegerField',
},
'PositiveIntegerField': {
'class_name': 'models.PositiveIntegerField',
},
'PositiveSmallIntegerField': {
'class_name': 'models.PositiveSmallIntegerField',
},
'SlugField': {
'class_name': 'models.SlugField',
'nullable': False,
},
'SmallAutoField': {
'class_name': 'models.SmallAutoField',
},
'SmallIntegerField': {
'class_name': 'models.SmallIntegerField',
},
'TextField': {
'class_name': 'models.TextField',
'nullable': False,
},
'TimeField': {
'class_name': 'models.TimeField',
},
'URLField': {
'class_name': 'models.URLField',
'nullable': False,
},
'UUIDField': {
'class_name': 'models.UUIDField',
},
}
| 31.531915 | 103 | 0.624494 | DEFAULT_SETTINGS = {
'CREATE_HTML_VIEW_RESOURCES': True,
'CREATE_REST_VIEW_RESOURCES': True,
'DEFAULT_MODEL_IMPORTS': [],
'DEFAULT_FORM_IMPORTS': [
'django.forms',
],
'FIELDS': {},
'MODEL_EXTRA_IMPORT_CLASSES': [
'django.db.models',
],
'MODEL_PARENT_CLASSES': ['django.db.models.Model'],
'FORM_EXTRA_IMPORT_CLASSES': ['django.forms'],
'FORM_PARENT_CLASSES': ['django.forms.ModelForm'],
'VIEW_EXTRA_IMPORT_CLASSES': [],
'VIEW_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'VIEW_PERMISSION_CODES': [
'view',
],
'LIST_VIEW_PARENT_CLASSES': ['django.views.generic.list.ListView'],
'DETAIL_VIEW_PARENT_CLASSES': ['django.views.generic.detail.DetailView'],
'ADD_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'ADD_PERMISSION_CODES': ['add', 'create'],
'CREATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.CreateView'],
'CREATE_URL_PATH': 'create',
'CHANGE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'CHANGE_PERMISSION_CODES': [
'change',
],
'UPDATE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.UpdateView'],
'UPDATE_URL_PATH': 'update',
'DELETE_PERMISSION_CLASSES': [
'django.contrib.auth.mixins.PermissionRequiredMixin',
],
'DELETE_PERMISSION_CODES': [
'delete',
],
'DELETE_VIEW_PARENT_CLASSES': ['django.views.generic.edit.DeleteView'],
'DELETE_URL_PATH': 'delete',
'ADMIN_EXTRA_IMPORT_CLASSES': ['django.contrib.admin'],
'ADMIN_PARENT_CLASSES': ['django.contrib.admin.ModelAdmin'],
'URL_EXTRA_IMPORT_CLASSES': ['django.urls.path'],
'SCAFFOLD_REST_FRAMEWORK': True,
'REST_FRAMEWORK_SERIALIZER_EXTRA_IMPORT_CLASSES': ['rest_framework.serializers'],
'REST_FRAMEWORK_SERIALIZER_PARENT_CLASSES': ['rest_framework.serializers.ModelSerializer'],
'REST_FRAMEWORK_VIEWSET_EXTRA_IMPORT_CLASSES': ['rest_framework.viewsets'],
'REST_FRAMEWORK_VIEWSET_PARENT_CLASSES': ['rest_framework.viewsets.ModelViewSet'],
'REST_FRAMEWORK_VIEWSET_PERMISSION_CLASSES': ['rest_framework.permissions.DjangoModelPermissions'],
'REST_FRAMEWORK_DEFAULT_ROUTER': 'rest_framework.routers.DefaultRouter',
'SCAFFOLD_TEMPLATES': False,
'FORM_EXTRA': '',
'TEMPLATE_VIEW_LIST': 'scaffold_generator/views/model_list.html.template',
'TEMPLATE_VIEW_DETAIL': 'scaffold_generator/views/model_detail.html.template',
'TEMPLATE_VIEW_FORM': 'scaffold_generator/views/model_form.html.template',
'TEMPLATE_VIEW_DELETE': 'scaffold_generator/views/model_delete.html.template',
'ADD_LIST_VIEW_TO_NAVBAR_TEMPLATE': '',
'NAVBAR_ITEM_TEMPLATE': 'scaffold_generator/navbar_item.html.template',
}
DEFAULT_FIELDS = {
'AutoField': {
'class_name': 'models.AutoField',
},
'BigAutoField': {
'class_name': 'models.BigAutoField',
},
'BigIntegerField': {
'class_name': 'models.BigIntegerField',
},
'BinaryField': {
'class_name': 'models.BinaryField',
},
'BooleanField': {
'class_name': 'models.BooleanField',
},
'CharField': {
'class_name': 'models.CharField',
'default_kwargs': {
'max_length': '128',
},
'nullable': False,
},
'CommaSeparatedIntegerField': {
'class_name': 'models.CommaSeparatedIntegerField',
'nullable': False,
},
'DateField': {
'class_name': 'models.DateField',
},
'DateTimeField': {
'class_name': 'models.DateTimeField',
},
'DecimalField': {
'class_name': 'models.DecimalField',
},
'DurationField': {
'class_name': 'models.DurationField',
},
'EmailField': {
'class_name': 'models.EmailField',
'nullable': False,
},
'FileField': {
'class_name': 'models.FileField',
'nullable': False,
},
'FilePathField': {
'class_name': 'models.FilePathField',
},
'FloatField': {
'class_name': 'models.FloatField',
},
'ForeignKey': {
'class_name': 'models.ForeignKey',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'GenericIPAddressField': {
'class_name': 'models.GenericIPAddressField',
},
'IPAddressField': {
'class_name': 'models.IPAddressField',
},
'ImageField': {
'class_name': 'models.ImageField',
'nullable': False,
},
'IntegerField': {
'class_name': 'models.IntegerField',
},
'JSONField': {
'class_name': 'models.JSONField',
},
'ManyToManyField': {
'class_name': 'models.ManyToManyField',
'nullable': False,
},
'NullBooleanField': {
'class_name': 'models.NullBooleanField',
},
'OneToOneField': {
'class_name': 'models.OneToOneField',
'default_kwargs': {
'on_delete': 'models.CASCADE',
},
},
'PositiveBigIntegerField': {
'class_name': 'models.PositiveBigIntegerField',
},
'PositiveIntegerField': {
'class_name': 'models.PositiveIntegerField',
},
'PositiveSmallIntegerField': {
'class_name': 'models.PositiveSmallIntegerField',
},
'SlugField': {
'class_name': 'models.SlugField',
'nullable': False,
},
'SmallAutoField': {
'class_name': 'models.SmallAutoField',
},
'SmallIntegerField': {
'class_name': 'models.SmallIntegerField',
},
'TextField': {
'class_name': 'models.TextField',
'nullable': False,
},
'TimeField': {
'class_name': 'models.TimeField',
},
'URLField': {
'class_name': 'models.URLField',
'nullable': False,
},
'UUIDField': {
'class_name': 'models.UUIDField',
},
}
| true | true |
f71bf0a388d2199024ab2f51328c979f5c521f72 | 9,018 | py | Python | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | 1 | 2021-05-17T06:06:57.000Z | 2021-05-17T06:06:57.000Z | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | test/functional/wallet_abandonconflict.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
from test_framework.sbercoinconfig import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.01") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 47.714286 | 138 | 0.66833 |
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
from test_framework.sbercoinconfig import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.01")
balance = newbalance
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| true | true |
f71bf12d2f88d3050797e3faff333cb5b247f6af | 4,243 | py | Python | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | BPR-example/tSNE_matlab/tsne_p.py | jsirait/selection-bias-code | 5b89b2d9bc3c996e27dde4773be105698fd72db9 | [
"BSD-3-Clause"
] | null | null | null | # Generated with SMOP 0.41
from libsmop import *
# tsne_p.m
@function
def tsne_p(P=None,labels=None,no_dims=None,*args,**kwargs):
varargin = tsne_p.varargin
nargin = tsne_p.nargin
#TSNE_P Performs symmetric t-SNE on affinity matrix P
# mappedX = tsne_p(P, labels, no_dims)
# The function performs symmetric t-SNE on pairwise similarity matrix P
# to create a low-dimensional map of no_dims dimensions (default = 2).
# The matrix P is assumed to be symmetric, sum up to 1, and have zeros
# on the diagonal.
# The labels of the data are not used by t-SNE itself, however, they
# are used to color intermediate plots. Please provide an empty labels
# matrix [] if you don't want to plot results during the optimization.
# The low-dimensional data representation is returned in mappedX.
# (C) Laurens van der Maaten, 2010
# University of California, San Diego
if logical_not(exist('labels','var')):
labels=[]
# tsne_p.m:21
if logical_not(exist('no_dims','var')) or isempty(no_dims):
no_dims=2
# tsne_p.m:24
# First check whether we already have an initial solution
if numel(no_dims) > 1:
initial_solution=copy(true)
# tsne_p.m:29
ydata=copy(no_dims)
# tsne_p.m:30
no_dims=size(ydata,2)
# tsne_p.m:31
else:
initial_solution=copy(false)
# tsne_p.m:33
# Initialize some variables
n=size(P,1)
# tsne_p.m:37
momentum=0.5
# tsne_p.m:38
final_momentum=0.8
# tsne_p.m:39
mom_switch_iter=250
# tsne_p.m:40
stop_lying_iter=100
# tsne_p.m:41
max_iter=1000
# tsne_p.m:42
epsilon=500
# tsne_p.m:43
min_gain=0.01
# tsne_p.m:44
# Make sure P-vals are set properly
P[arange(1,end(),n + 1)]=0
# tsne_p.m:47
P=dot(0.5,(P + P.T))
# tsne_p.m:48
P=max(P / sum(ravel(P)),realmin)
# tsne_p.m:49
const=sum(multiply(ravel(P),log(ravel(P))))
# tsne_p.m:50
if logical_not(initial_solution):
P=dot(P,4)
# tsne_p.m:52
# Initialize the solution
if logical_not(initial_solution):
ydata=dot(0.0001,randn(n,no_dims))
# tsne_p.m:57
y_incs=zeros(size(ydata))
# tsne_p.m:59
gains=ones(size(ydata))
# tsne_p.m:60
for iter in arange(1,max_iter).reshape(-1):
# Compute joint probability that point i and j are neighbors
sum_ydata=sum(ydata ** 2,2)
# tsne_p.m:66
num=1 / (1 + bsxfun(plus,sum_ydata,bsxfun(plus,sum_ydata.T,dot(- 2,(dot(ydata,ydata.T))))))
# tsne_p.m:67
num[arange(1,end(),n + 1)]=0
# tsne_p.m:68
Q=max(num / sum(ravel(num)),realmin)
# tsne_p.m:69
# Compute the gradients (faster implementation)
L=multiply((P - Q),num)
# tsne_p.m:72
y_grads=dot(dot(4,(diag(sum(L,1)) - L)),ydata)
# tsne_p.m:73
gains=multiply((gains + 0.2),(sign(y_grads) != sign(y_incs))) + multiply((dot(gains,0.8)),(sign(y_grads) == sign(y_incs)))
# tsne_p.m:76
gains[gains < min_gain]=min_gain
# tsne_p.m:78
y_incs=dot(momentum,y_incs) - dot(epsilon,(multiply(gains,y_grads)))
# tsne_p.m:79
ydata=ydata + y_incs
# tsne_p.m:80
ydata=bsxfun(minus,ydata,mean(ydata,1))
# tsne_p.m:81
if iter == mom_switch_iter:
momentum=copy(final_momentum)
# tsne_p.m:85
if iter == stop_lying_iter and logical_not(initial_solution):
P=P / 4
# tsne_p.m:88
# Print out progress
if logical_not(rem(iter,10)):
cost=const - sum(multiply(ravel(P),log(ravel(Q))))
# tsne_p.m:93
disp(concat(['Iteration ',num2str(iter),': error is ',num2str(cost)]))
# Display scatter plot (maximally first three dimensions)
if logical_not(rem(iter,10)) and logical_not(isempty(labels)):
if no_dims == 1:
scatter(ydata,ydata,9,labels,'filled')
else:
if no_dims == 2:
scatter(ydata(arange(),1),ydata(arange(),2),9,labels,'filled')
else:
scatter3(ydata(arange(),1),ydata(arange(),2),ydata(arange(),3),40,labels,'filled')
axis('tight')
axis('off')
drawnow
| 27.732026 | 130 | 0.607825 |
from libsmop import *
@function
def tsne_p(P=None,labels=None,no_dims=None,*args,**kwargs):
varargin = tsne_p.varargin
nargin = tsne_p.nargin
# The low-dimensional data representation is returned in mappedX.
# (C) Laurens van der Maaten, 2010
# University of California, San Diego
if logical_not(exist('labels','var')):
labels=[]
# tsne_p.m:21
if logical_not(exist('no_dims','var')) or isempty(no_dims):
no_dims=2
# tsne_p.m:24
# First check whether we already have an initial solution
if numel(no_dims) > 1:
initial_solution=copy(true)
# tsne_p.m:29
ydata=copy(no_dims)
# tsne_p.m:30
no_dims=size(ydata,2)
# tsne_p.m:31
else:
initial_solution=copy(false)
# tsne_p.m:33
# Initialize some variables
n=size(P,1)
# tsne_p.m:37
momentum=0.5
# tsne_p.m:38
final_momentum=0.8
# tsne_p.m:39
mom_switch_iter=250
# tsne_p.m:40
stop_lying_iter=100
# tsne_p.m:41
max_iter=1000
# tsne_p.m:42
epsilon=500
# tsne_p.m:43
min_gain=0.01
# tsne_p.m:44
# Make sure P-vals are set properly
P[arange(1,end(),n + 1)]=0
# tsne_p.m:47
P=dot(0.5,(P + P.T))
# tsne_p.m:48
P=max(P / sum(ravel(P)),realmin)
# tsne_p.m:49
const=sum(multiply(ravel(P),log(ravel(P))))
# tsne_p.m:50
if logical_not(initial_solution):
P=dot(P,4)
# tsne_p.m:52
# Initialize the solution
if logical_not(initial_solution):
ydata=dot(0.0001,randn(n,no_dims))
# tsne_p.m:57
y_incs=zeros(size(ydata))
# tsne_p.m:59
gains=ones(size(ydata))
# tsne_p.m:60
for iter in arange(1,max_iter).reshape(-1):
# Compute joint probability that point i and j are neighbors
sum_ydata=sum(ydata ** 2,2)
# tsne_p.m:66
num=1 / (1 + bsxfun(plus,sum_ydata,bsxfun(plus,sum_ydata.T,dot(- 2,(dot(ydata,ydata.T))))))
# tsne_p.m:67
num[arange(1,end(),n + 1)]=0
# tsne_p.m:68
Q=max(num / sum(ravel(num)),realmin)
# tsne_p.m:69
# Compute the gradients (faster implementation)
L=multiply((P - Q),num)
# tsne_p.m:72
y_grads=dot(dot(4,(diag(sum(L,1)) - L)),ydata)
# tsne_p.m:73
gains=multiply((gains + 0.2),(sign(y_grads) != sign(y_incs))) + multiply((dot(gains,0.8)),(sign(y_grads) == sign(y_incs)))
# tsne_p.m:76
gains[gains < min_gain]=min_gain
# tsne_p.m:78
y_incs=dot(momentum,y_incs) - dot(epsilon,(multiply(gains,y_grads)))
# tsne_p.m:79
ydata=ydata + y_incs
# tsne_p.m:80
ydata=bsxfun(minus,ydata,mean(ydata,1))
# tsne_p.m:81
if iter == mom_switch_iter:
momentum=copy(final_momentum)
# tsne_p.m:85
if iter == stop_lying_iter and logical_not(initial_solution):
P=P / 4
# tsne_p.m:88
# Print out progress
if logical_not(rem(iter,10)):
cost=const - sum(multiply(ravel(P),log(ravel(Q))))
# tsne_p.m:93
disp(concat(['Iteration ',num2str(iter),': error is ',num2str(cost)]))
# Display scatter plot (maximally first three dimensions)
if logical_not(rem(iter,10)) and logical_not(isempty(labels)):
if no_dims == 1:
scatter(ydata,ydata,9,labels,'filled')
else:
if no_dims == 2:
scatter(ydata(arange(),1),ydata(arange(),2),9,labels,'filled')
else:
scatter3(ydata(arange(),1),ydata(arange(),2),ydata(arange(),3),40,labels,'filled')
axis('tight')
axis('off')
drawnow
| true | true |
f71bf176600708ef5ff1a1e657e714c9aea3da7b | 664 | py | Python | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | dags/externaltasksensor_dag.py | vannguyen3007/Apache-Airflow | 1c8a6f3c9c0022807cbae4c4f83de33b8454ae24 | [
"Apache-2.0"
] | null | null | null | import pprint as pp
import airflow.utils.dates
from airflow import DAG
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)
}
with DAG(dag_id="externaltasksensor_dag", default_args=default_args, schedule_interval="@daily") as dag:
sensor = ExternalTaskSensor(
task_id='sensor',
external_dag_id='sleep_dag',
external_task_id='t2'
)
last_task = DummyOperator(task_id="last_task")
sensor >> last_task | 30.181818 | 104 | 0.733434 | import pprint as pp
import airflow.utils.dates
from airflow import DAG
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)
}
with DAG(dag_id="externaltasksensor_dag", default_args=default_args, schedule_interval="@daily") as dag:
sensor = ExternalTaskSensor(
task_id='sensor',
external_dag_id='sleep_dag',
external_task_id='t2'
)
last_task = DummyOperator(task_id="last_task")
sensor >> last_task | true | true |
f71bf1d55740e24cffa947f3eb30e8b50f0271f3 | 198 | py | Python | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | 4 | 2018-08-27T05:36:37.000Z | 2018-08-29T09:41:50.000Z | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | null | null | null | store_debug/store_debug/main.py | ruber0id/band-services-set | 471bb4bf561ca5ec9e0e3a7bda75574b0c718fde | [
"Apache-2.0"
] | 2 | 2020-05-20T14:50:59.000Z | 2020-08-31T14:44:57.000Z | import asyncio
import ujson
from band import logger, expose
"""
Listen events and write to output
"""
@expose.listener()
async def broadcast(**params):
logger.info('Broadcast', params=params)
| 16.5 | 43 | 0.737374 | import asyncio
import ujson
from band import logger, expose
@expose.listener()
async def broadcast(**params):
logger.info('Broadcast', params=params)
| true | true |
f71bf5092e429695b0035e78d005436c626887d2 | 26,148 | py | Python | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 3 | 2020-03-06T21:24:24.000Z | 2021-03-21T06:38:00.000Z | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 40 | 2019-10-14T17:02:54.000Z | 2022-03-09T13:35:54.000Z | python/mtap/deployment.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 2 | 2019-10-14T15:42:46.000Z | 2020-03-05T23:29:01.000Z | # Copyright 2020 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for deploying a set of processing services and the events server all at once.
Examples:
An example configuration
>>> deploy = Deployment(
>>> GlobalSettings(host='0.0.0.0'),
>>> EventsDeployment(port=10100, workers=8),
>>> SharedProcessorConfig(workers=8, jvm_args=['-Xms32m', '-Xmx8g'], classpath='blah.jar'),
>>> ProcessorDeployment(implementation='python',
>>> entry_point='mtap.examples.example_processor',
>>> instances=4,
>>> port=10101,
>>> workers=4),
>>> ProcessorDeployment(implementation='java',
>>> entry_point='edu.umn.nlpie.mtap.WordOccurrencesExampleProcessor',
>>> port=10105)
>>> )
>>> deploy.run_servers()
"""
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import sys
import threading
import time
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import grpc
from mtap import utilities, _config
__all__ = [
'Deployment', 'GlobalSettings', 'SharedProcessorConfig', 'EventsDeployment',
'ProcessorDeployment', 'main', 'deployment_parser', 'ServiceDeploymentException',
]
logger = logging.getLogger(__name__)
PYTHON_EXE = sys.executable
def _get_java() -> str:
try:
return str(pathlib.Path(os.environ['JAVA_HOME']) / 'bin' / 'java')
except KeyError:
return 'java'
JAVA_EXE = _get_java()
def _listen(process: subprocess.Popen) -> int:
for line in process.stdout:
print(line.decode(), end='', flush=True)
return process.wait()
class ServiceDeploymentException(Exception):
"""Exception raised in the case of a service failing to launch.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class GlobalSettings:
"""Settings shared by event service and all processors.
Keyword Args:
host (Optional[str]): The global host, by default will use "127.0.0.1".
mtap_config (Optional[str]): The path to an MTAP config file to load for all services.
log_level (Optional[str]): A python logging level to pass to all services.
register (Optional[str]): Whether services should register with service discovery.
Attributes:
host (Optional[str]): The global host, by default will use "127.0.0.1".
mtap_config (Optional[str]): The path to an MTAP config file to load for all services.
log_level (Optional[str]): A python logging level to pass to all services.
register (Optional[str]): Whether services should register with service discovery.
"""
def __init__(self, *,
host: Optional[str] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
register: Optional[bool] = None):
self.host = host
self.mtap_config = mtap_config
self.log_level = log_level
self.register = register
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'GlobalSettings':
"""Creates a global settings object from a configuration dictionary.
Keyword Args:
conf (Optional[Dict]): The configuration dictionary.
Returns:
GlobalSettings: The global settings object.
"""
conf = conf or {}
return GlobalSettings(host=conf.get('host'), mtap_config=conf.get('mtap_config'),
log_level=conf.get('log_level'), register=conf.get('register'))
class SharedProcessorConfig:
"""Configuration that is shared between multiple processor services.
Keyword Args:
events_addresses (Optional[str]): An optional GRPC-compatible target for the events
service to be used by all processors.
workers (Optional[int]): The default number of worker threads which will perform
processing.
additional_args (Optional[List[str]]): a list of additional arguments that
should be appended to every processor.
jvm_args (Optional[List[str]]): a list of JVM arguments for all java
processors.
java_classpath (Optional[str]): A classpath string that will be passed to all java
processors.
startup_timeout (Optional[int]): The default startup timeout for processors.
mp_spawn_method (Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
Attributes:
events_addresses (Optional[List[str]]): An optional GRPC-compatible target for the events
service to be used by all processors.
workers (Optional[int]): The default number of worker threads which will perform
processing.
additional_args (Optional[List[str]]): a list of additional arguments that
should be appended to every processor.
jvm_args (Optional[List[str]]): a list of JVM arguments for all java
processors.
java_classpath (Optional[str]): A classpath string that will be passed to all java
processors.
startup_timeout (Optional[int]): The default startup timeout for processors.
mp_spawn_method (Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
"""
def __init__(self,
events_addresses: Optional[List[str]] = None,
workers: Optional[int] = None,
additional_args: Optional[List[str]] = None,
jvm_args: Optional[List[str]] = None,
java_classpath: Optional[str] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.events_addresses = events_addresses
self.workers = workers
self.additional_args = additional_args
self.jvm_args = jvm_args
self.java_classpath = java_classpath
self.startup_timeout = startup_timeout or 30
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'SharedProcessorConfig':
"""Builds a configuration from a dictionary representation.
Args:
conf (Optional[Dict]): The configuration dictionary.
Returns:
SharedProcessorConfig object.
"""
conf = conf or {}
return SharedProcessorConfig(**conf)
class _ServiceDeployment:
def __init__(self,
workers: Optional[int],
register: Optional[bool],
mtap_config: Optional[str],
log_level: Optional[str]):
self.workers = workers
self.register = register
self.mtap_config = mtap_config
self.log_level = log_level
def service_args(self,
host: Optional[str] = None,
port: Optional[int] = None,
register_default: Optional[bool] = None,
host_default: Optional[str] = None,
workers_default: Optional[int] = None,
mtap_config_default: Optional[str] = None,
log_level_default: Optional[str] = None):
call = []
host = host or host_default
if host is not None:
call.extend(['--host', str(host)])
if port is not None:
call.extend(['--port', str(port)])
if self.register or register_default:
call.append('--register')
workers = self.workers or workers_default
if workers is not None:
call.extend(['--workers', str(workers)])
mtap_config = self.mtap_config or mtap_config_default
if mtap_config is not None:
call.extend(['--mtap-config', mtap_config])
log_level = self.log_level or log_level_default
if log_level is not None:
call.extend(['--log-level', log_level])
call.append('--write-address')
return call
class EventsDeployment:
"""Deployment configuration for the events service.
Keyword Args:
enabled (bool): Whether an events service should be created.
addresses (~typing.Optional[~typing.Sequence[str]]): The host address of the events service.
workers (~typing.Optional[int]): The number of worker threads the events service should use.
register (~typing.Optional[bool]): Whether to register the events service with discovery.
mtap_config (~typing.Optional[str]): Path to an mtap configuration file.
log_level (~typing.Optional[str]): The log level for the events service.
"""
def __init__(self, *,
enabled: bool = True,
addresses: Optional[Sequence[str]] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None):
self.enabled = enabled
self.addresses = addresses
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
def create_calls(self, global_settings: GlobalSettings) -> Iterable[List[str]]:
for address in self.addresses:
host = None
port = None
if address:
splits = address.split(':')
if len(splits) == 2:
host, port = splits
if host == '':
host = None
else:
host = splits[0]
call = [PYTHON_EXE, '-m', 'mtap', 'events']
service_args = self.service_deployment.service_args(
host=host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level
)
call.extend(service_args)
yield call
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'EventsDeployment':
"""Creates the EventsDeployment configuration option from a configuration dictionary.
Args:
conf (Optional[Dict]): The configuration dictionary
Returns:
EventsDeployment or None from the configuration dictionary.
"""
conf = conf or {}
enabled = conf.get('enabled')
if enabled is None:
enabled = False
address = conf.get('address', None) or conf.get('addresses', None)
if address is None:
addresses = []
elif isinstance(address, str):
addresses = [address]
elif isinstance(address, Iterable):
addresses = list(address)
else:
raise ValueError('Unrecognized type of address: ' + type(address))
return EventsDeployment(enabled=enabled, addresses=addresses,
workers=conf.get('workers'), register=conf.get('register'),
mtap_config=conf.get('mtap_config'))
class ProcessorDeployment:
"""Deployment configuration for an MTAP processor.
Used to construct the command for launching the processor. The processor should be a Java Class
with a main method or a Python module with a main block. It should accept the standard MTAP
processor deployment arguments and launch an MTAP processor using :func:`mtap.run_processor` or
the equivalent Java method.
Args:
implementation (str): Either "java" or "python".
entry_point (str): Either the java main class, or the python main module.
enabled (~typing.Optional[bool]): Whether the processor should be launched as part of
deployment. Default is `True` if `None`.
instances (~typing.Optional[int]): The number of instances of the processor to launch.
Default is `1` if `None`.
host (~typing.Optional[str]): The listening host for the processor service.
port (~typing.Optional[int]): The listening port for the processor service.
workers (~typing.Optional[int]): The number of worker threads per instance.
register (~typing.Optional[bool]):
Whether the processor should register with the discovery service specified in the MTAP
configuration
mtap_config (~typing.Optional[str]): Path to the MTAP configuration file.
log_level (~typing.Optional[str]): The log level for the processor.
identifier (~typing.Optional[str]): An optional identifier override to use for registration.
pre_args (~typing.Optional[~typing.List[str]]):
Arguments that occur prior to the MTAP service arguments (like host, port, etc).
additional_args (~typing.Optional[~typing.List[str]]):
Arguments that occur after the MTAP service arguments.
startup_timeout (~typing.Optional[int]): Optional override startup timeout.
mp_spawn_method (~typing.Optional[str]): A :meth:`multiprocessing.get_context` argument to create
the multiprocessing context.
"""
def __init__(self,
implementation: str,
entry_point: str,
*, enabled: Optional[bool] = None,
instances: Optional[int] = None,
host: Optional[str] = None,
port: Optional[int] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
identifier: Optional[str] = None,
pre_args: Optional[List[str]] = None,
additional_args: Optional[List[str]] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.enabled = enabled if enabled is not None else True
self.implementation = implementation
self.entry_point = entry_point
self.instances = instances or 1
if not isinstance(self.instances, int) or self.instances < 1:
raise ValueError("Instances must be strictly positive integer.")
self.identifier = identifier
self.pre_args = pre_args
self.additional_args = additional_args
self.host = host
self.port = port
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
self.startup_timeout = startup_timeout
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Dict) -> 'ProcessorDeployment':
"""Creates an MTAP processor deployment configuration from a configuration dictionary.
Args:
conf (Dict): The configuration dictionary.
Returns:
ProcessorDeployment object that can be used to constuct the call for the processor.
"""
return ProcessorDeployment(**conf)
def create_calls(self,
global_settings: GlobalSettings,
shared_config: SharedProcessorConfig) -> Iterable[List[str]]:
if isinstance(self.port, list):
ports = self.port
elif self.port is None:
ports = [None] * self.instances
else:
ports = list(range(self.port, self.port + self.instances))
for port in ports:
if self.implementation == 'python':
call = [PYTHON_EXE, '-m', self.entry_point]
mp_spawn_method = shared_config.mp_spawn_method
if self.mp_spawn_method is not None:
mp_spawn_method = self.mp_spawn_method
if mp_spawn_method is not None:
call.extend(['--mp-spawn-method', mp_spawn_method])
elif self.implementation == 'java':
call = [str(JAVA_EXE)]
if shared_config.jvm_args is not None:
call.extend(shared_config.jvm_args)
if shared_config.java_classpath is not None:
call.extend(['-cp', shared_config.java_classpath])
call.append(self.entry_point)
else:
raise ValueError('Unrecognized implementation: ' + self.implementation)
if self.pre_args is not None:
call.extend(self.pre_args)
service_args = self.service_deployment.service_args(
host=self.host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level,
workers_default=shared_config.workers
)
call.extend(service_args)
if self.identifier is not None:
call.extend(['--identifier', self.identifier])
events_addresses = shared_config.events_addresses
if events_addresses is not None:
call.extend(['--events', ','.join(events_addresses)])
if self.additional_args is not None:
call.extend(self.additional_args)
if shared_config.additional_args is not None:
call.extend(shared_config.additional_args)
yield call
class Deployment:
"""A automatic deployment configuration which launches a configurable set of MTAP services.
Args:
global_settings (~typing.Optional[GlobalSettings]): Settings shared among all services.
events_deployment (~typing.Optional[EventsDeployment]):
Deployment settings for the events service.
shared_processor_config (~typing.Optional[SharedProcessorConfig]):
Shared configuration settings for all processors.
processors (vararg ProcessorDeployment): Configurations for individual processors.
"""
def __init__(self,
global_settings: Optional[GlobalSettings] = None,
events_deployment: Optional[EventsDeployment] = None,
shared_processor_config: Optional[SharedProcessorConfig] = None,
*processors: ProcessorDeployment):
self.global_settings = global_settings
self.events_deployment = events_deployment
self.shared_processor_config = shared_processor_config
self.processors = processors
@staticmethod
def load_configuration(conf: Dict) -> 'Deployment':
"""Creates a deployment object from a configuration dictionary.
Args:
conf (Dict): The configuration dictionary.
Returns:
Deployment object created.
"""
global_settings = GlobalSettings.from_conf(conf.get('global'))
events = EventsDeployment.from_conf(conf.get('events_service'))
shared_processor_config = SharedProcessorConfig.from_conf(conf.get('shared_processor_config'))
processors_list = conf.get('processors', [])
processors = [ProcessorDeployment.from_conf(c) for c in processors_list]
return Deployment(global_settings, events, shared_processor_config, *processors)
@staticmethod
def from_yaml_file(conf_path: Union[pathlib.Path, str]) -> 'Deployment':
"""Loads a deployment configuration from a yaml file.
Args:
conf_path (str or pathlib.Path): The path to the yaml configuration file.
Returns:
Deployment object created from the configuration.
"""
conf_path = pathlib.Path(conf_path)
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with conf_path.open('rb') as f:
conf = load(f, Loader=Loader)
return Deployment.load_configuration(conf)
def run_servers(self):
"""Starts all of the configured services.
Raises:
ServiceDeploymentException: If one of the services fails to launch.
"""
with _config.Config() as c:
enable_proxy = c.get('grpc.enable_proxy', False)
processes_listeners = []
events_addresses = []
def shutdown(kill=False):
print("Shutting down all processors")
for p, listener in processes_listeners:
if kill:
p.kill()
listener.join(timeout=1)
if self.events_deployment.enabled:
for call in self.events_deployment.create_calls(self.global_settings):
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
listener, events_address = _listen_test_connectivity(p, "events", 30, enable_proxy)
events_addresses.append(events_address)
processes_listeners.append((p, listener))
if (not self.global_settings.register
and not self.events_deployment.service_deployment.register
and self.shared_processor_config.events_addresses is None):
self.shared_processor_config.events_addresses = events_addresses
for processor_deployment in self.processors:
if processor_deployment.enabled:
for call in processor_deployment.create_calls(self.global_settings,
self.shared_processor_config):
logger.debug('Launching processor with call: %s', call)
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
startup_timeout = (processor_deployment.startup_timeout
or self.shared_processor_config.startup_timeout)
try:
listener, _ = _listen_test_connectivity(p, call, startup_timeout,
enable_proxy)
except ServiceDeploymentException as e:
logger.error(str(e))
return shutdown(kill=True)
processes_listeners.append((p, listener))
print('Done deploying all servers.', flush=True)
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
return shutdown()
def _listen_test_connectivity(p: subprocess.Popen,
name: Any,
startup_timeout: int,
enable_proxy: bool = False) -> Tuple[threading.Thread, str]:
listener = threading.Thread(target=_listen, args=(p,))
listener.start()
address = None
for i in range(startup_timeout):
try:
address = utilities.read_address(str(p.pid))
break
except FileNotFoundError:
time.sleep(1)
if address is None:
raise ServiceDeploymentException('Timed out waiting for {} to launch'.format(name))
with grpc.insecure_channel(address,
options=[('grpc.enable_http_proxy', enable_proxy)]) as channel:
future = grpc.channel_ready_future(channel)
try:
future.result(timeout=startup_timeout)
except grpc.FutureTimeoutError:
raise ServiceDeploymentException('Failed to launch: {}'.format(name))
return listener, address
def main(args: Optional[Sequence[str]] = None,
conf: Optional[argparse.Namespace] = None):
if conf is None:
conf = deployment_parser().parse_args(args)
if conf.log_level is not None:
logging.basicConfig(level=getattr(logging, conf.log_level))
if conf.mode == 'run_servers':
deployment = Deployment.from_yaml_file(conf.deploy_config)
deployment.run_servers()
if conf.mode == 'write_example':
example = pathlib.Path(__file__).parent / "examples" / "exampleDeploymentConfiguration.yml"
shutil.copyfile(str(example), "exampleDeploymentConfiguration.yml")
print('Writing "exampleDeploymentConfiguration.yml" to ' + str(pathlib.Path.cwd()))
def deployment_parser() -> argparse.ArgumentParser:
"""Creates a parser for configuration that can be passed to the deployment main method.
Returns:
~argparse.ArgumentParser: The argument parser object that will create a namespace that can
be passed to :func:`main`.
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--log-level', metavar='LEVEL',
help="The log level to use for the deployment script.")
subparsers = parser.add_subparsers(title='mode')
run_servers = subparsers.add_parser('run_servers')
run_servers.add_argument('deploy_config', metavar='CONFIG_FILE', type=pathlib.Path,
help="A path to the deployment configuration to deploy.")
run_servers.set_defaults(mode='run_servers')
write_example = subparsers.add_parser('write_example')
write_example.set_defaults(mode='write_example')
return parser
if __name__ == '__main__':
main()
| 40.728972 | 105 | 0.618594 |
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import sys
import threading
import time
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import grpc
from mtap import utilities, _config
__all__ = [
'Deployment', 'GlobalSettings', 'SharedProcessorConfig', 'EventsDeployment',
'ProcessorDeployment', 'main', 'deployment_parser', 'ServiceDeploymentException',
]
logger = logging.getLogger(__name__)
PYTHON_EXE = sys.executable
def _get_java() -> str:
try:
return str(pathlib.Path(os.environ['JAVA_HOME']) / 'bin' / 'java')
except KeyError:
return 'java'
JAVA_EXE = _get_java()
def _listen(process: subprocess.Popen) -> int:
for line in process.stdout:
print(line.decode(), end='', flush=True)
return process.wait()
class ServiceDeploymentException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class GlobalSettings:
def __init__(self, *,
host: Optional[str] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
register: Optional[bool] = None):
self.host = host
self.mtap_config = mtap_config
self.log_level = log_level
self.register = register
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'GlobalSettings':
conf = conf or {}
return GlobalSettings(host=conf.get('host'), mtap_config=conf.get('mtap_config'),
log_level=conf.get('log_level'), register=conf.get('register'))
class SharedProcessorConfig:
def __init__(self,
events_addresses: Optional[List[str]] = None,
workers: Optional[int] = None,
additional_args: Optional[List[str]] = None,
jvm_args: Optional[List[str]] = None,
java_classpath: Optional[str] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.events_addresses = events_addresses
self.workers = workers
self.additional_args = additional_args
self.jvm_args = jvm_args
self.java_classpath = java_classpath
self.startup_timeout = startup_timeout or 30
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'SharedProcessorConfig':
conf = conf or {}
return SharedProcessorConfig(**conf)
class _ServiceDeployment:
def __init__(self,
workers: Optional[int],
register: Optional[bool],
mtap_config: Optional[str],
log_level: Optional[str]):
self.workers = workers
self.register = register
self.mtap_config = mtap_config
self.log_level = log_level
def service_args(self,
host: Optional[str] = None,
port: Optional[int] = None,
register_default: Optional[bool] = None,
host_default: Optional[str] = None,
workers_default: Optional[int] = None,
mtap_config_default: Optional[str] = None,
log_level_default: Optional[str] = None):
call = []
host = host or host_default
if host is not None:
call.extend(['--host', str(host)])
if port is not None:
call.extend(['--port', str(port)])
if self.register or register_default:
call.append('--register')
workers = self.workers or workers_default
if workers is not None:
call.extend(['--workers', str(workers)])
mtap_config = self.mtap_config or mtap_config_default
if mtap_config is not None:
call.extend(['--mtap-config', mtap_config])
log_level = self.log_level or log_level_default
if log_level is not None:
call.extend(['--log-level', log_level])
call.append('--write-address')
return call
class EventsDeployment:
def __init__(self, *,
enabled: bool = True,
addresses: Optional[Sequence[str]] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None):
self.enabled = enabled
self.addresses = addresses
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
def create_calls(self, global_settings: GlobalSettings) -> Iterable[List[str]]:
for address in self.addresses:
host = None
port = None
if address:
splits = address.split(':')
if len(splits) == 2:
host, port = splits
if host == '':
host = None
else:
host = splits[0]
call = [PYTHON_EXE, '-m', 'mtap', 'events']
service_args = self.service_deployment.service_args(
host=host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level
)
call.extend(service_args)
yield call
@staticmethod
def from_conf(conf: Optional[Dict]) -> 'EventsDeployment':
conf = conf or {}
enabled = conf.get('enabled')
if enabled is None:
enabled = False
address = conf.get('address', None) or conf.get('addresses', None)
if address is None:
addresses = []
elif isinstance(address, str):
addresses = [address]
elif isinstance(address, Iterable):
addresses = list(address)
else:
raise ValueError('Unrecognized type of address: ' + type(address))
return EventsDeployment(enabled=enabled, addresses=addresses,
workers=conf.get('workers'), register=conf.get('register'),
mtap_config=conf.get('mtap_config'))
class ProcessorDeployment:
def __init__(self,
implementation: str,
entry_point: str,
*, enabled: Optional[bool] = None,
instances: Optional[int] = None,
host: Optional[str] = None,
port: Optional[int] = None,
workers: Optional[int] = None,
register: Optional[bool] = None,
mtap_config: Optional[str] = None,
log_level: Optional[str] = None,
identifier: Optional[str] = None,
pre_args: Optional[List[str]] = None,
additional_args: Optional[List[str]] = None,
startup_timeout: Optional[int] = None,
mp_spawn_method: Optional[str] = None):
self.enabled = enabled if enabled is not None else True
self.implementation = implementation
self.entry_point = entry_point
self.instances = instances or 1
if not isinstance(self.instances, int) or self.instances < 1:
raise ValueError("Instances must be strictly positive integer.")
self.identifier = identifier
self.pre_args = pre_args
self.additional_args = additional_args
self.host = host
self.port = port
self.service_deployment = _ServiceDeployment(workers, register, mtap_config, log_level)
self.startup_timeout = startup_timeout
self.mp_spawn_method = mp_spawn_method
@staticmethod
def from_conf(conf: Dict) -> 'ProcessorDeployment':
return ProcessorDeployment(**conf)
def create_calls(self,
global_settings: GlobalSettings,
shared_config: SharedProcessorConfig) -> Iterable[List[str]]:
if isinstance(self.port, list):
ports = self.port
elif self.port is None:
ports = [None] * self.instances
else:
ports = list(range(self.port, self.port + self.instances))
for port in ports:
if self.implementation == 'python':
call = [PYTHON_EXE, '-m', self.entry_point]
mp_spawn_method = shared_config.mp_spawn_method
if self.mp_spawn_method is not None:
mp_spawn_method = self.mp_spawn_method
if mp_spawn_method is not None:
call.extend(['--mp-spawn-method', mp_spawn_method])
elif self.implementation == 'java':
call = [str(JAVA_EXE)]
if shared_config.jvm_args is not None:
call.extend(shared_config.jvm_args)
if shared_config.java_classpath is not None:
call.extend(['-cp', shared_config.java_classpath])
call.append(self.entry_point)
else:
raise ValueError('Unrecognized implementation: ' + self.implementation)
if self.pre_args is not None:
call.extend(self.pre_args)
service_args = self.service_deployment.service_args(
host=self.host,
port=port,
register_default=global_settings.register,
host_default=global_settings.host,
mtap_config_default=global_settings.mtap_config,
log_level_default=global_settings.log_level,
workers_default=shared_config.workers
)
call.extend(service_args)
if self.identifier is not None:
call.extend(['--identifier', self.identifier])
events_addresses = shared_config.events_addresses
if events_addresses is not None:
call.extend(['--events', ','.join(events_addresses)])
if self.additional_args is not None:
call.extend(self.additional_args)
if shared_config.additional_args is not None:
call.extend(shared_config.additional_args)
yield call
class Deployment:
def __init__(self,
global_settings: Optional[GlobalSettings] = None,
events_deployment: Optional[EventsDeployment] = None,
shared_processor_config: Optional[SharedProcessorConfig] = None,
*processors: ProcessorDeployment):
self.global_settings = global_settings
self.events_deployment = events_deployment
self.shared_processor_config = shared_processor_config
self.processors = processors
@staticmethod
def load_configuration(conf: Dict) -> 'Deployment':
global_settings = GlobalSettings.from_conf(conf.get('global'))
events = EventsDeployment.from_conf(conf.get('events_service'))
shared_processor_config = SharedProcessorConfig.from_conf(conf.get('shared_processor_config'))
processors_list = conf.get('processors', [])
processors = [ProcessorDeployment.from_conf(c) for c in processors_list]
return Deployment(global_settings, events, shared_processor_config, *processors)
@staticmethod
def from_yaml_file(conf_path: Union[pathlib.Path, str]) -> 'Deployment':
conf_path = pathlib.Path(conf_path)
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with conf_path.open('rb') as f:
conf = load(f, Loader=Loader)
return Deployment.load_configuration(conf)
def run_servers(self):
with _config.Config() as c:
enable_proxy = c.get('grpc.enable_proxy', False)
processes_listeners = []
events_addresses = []
def shutdown(kill=False):
print("Shutting down all processors")
for p, listener in processes_listeners:
if kill:
p.kill()
listener.join(timeout=1)
if self.events_deployment.enabled:
for call in self.events_deployment.create_calls(self.global_settings):
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
listener, events_address = _listen_test_connectivity(p, "events", 30, enable_proxy)
events_addresses.append(events_address)
processes_listeners.append((p, listener))
if (not self.global_settings.register
and not self.events_deployment.service_deployment.register
and self.shared_processor_config.events_addresses is None):
self.shared_processor_config.events_addresses = events_addresses
for processor_deployment in self.processors:
if processor_deployment.enabled:
for call in processor_deployment.create_calls(self.global_settings,
self.shared_processor_config):
logger.debug('Launching processor with call: %s', call)
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
startup_timeout = (processor_deployment.startup_timeout
or self.shared_processor_config.startup_timeout)
try:
listener, _ = _listen_test_connectivity(p, call, startup_timeout,
enable_proxy)
except ServiceDeploymentException as e:
logger.error(str(e))
return shutdown(kill=True)
processes_listeners.append((p, listener))
print('Done deploying all servers.', flush=True)
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
return shutdown()
def _listen_test_connectivity(p: subprocess.Popen,
name: Any,
startup_timeout: int,
enable_proxy: bool = False) -> Tuple[threading.Thread, str]:
listener = threading.Thread(target=_listen, args=(p,))
listener.start()
address = None
for i in range(startup_timeout):
try:
address = utilities.read_address(str(p.pid))
break
except FileNotFoundError:
time.sleep(1)
if address is None:
raise ServiceDeploymentException('Timed out waiting for {} to launch'.format(name))
with grpc.insecure_channel(address,
options=[('grpc.enable_http_proxy', enable_proxy)]) as channel:
future = grpc.channel_ready_future(channel)
try:
future.result(timeout=startup_timeout)
except grpc.FutureTimeoutError:
raise ServiceDeploymentException('Failed to launch: {}'.format(name))
return listener, address
def main(args: Optional[Sequence[str]] = None,
conf: Optional[argparse.Namespace] = None):
if conf is None:
conf = deployment_parser().parse_args(args)
if conf.log_level is not None:
logging.basicConfig(level=getattr(logging, conf.log_level))
if conf.mode == 'run_servers':
deployment = Deployment.from_yaml_file(conf.deploy_config)
deployment.run_servers()
if conf.mode == 'write_example':
example = pathlib.Path(__file__).parent / "examples" / "exampleDeploymentConfiguration.yml"
shutil.copyfile(str(example), "exampleDeploymentConfiguration.yml")
print('Writing "exampleDeploymentConfiguration.yml" to ' + str(pathlib.Path.cwd()))
def deployment_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--log-level', metavar='LEVEL',
help="The log level to use for the deployment script.")
subparsers = parser.add_subparsers(title='mode')
run_servers = subparsers.add_parser('run_servers')
run_servers.add_argument('deploy_config', metavar='CONFIG_FILE', type=pathlib.Path,
help="A path to the deployment configuration to deploy.")
run_servers.set_defaults(mode='run_servers')
write_example = subparsers.add_parser('write_example')
write_example.set_defaults(mode='write_example')
return parser
if __name__ == '__main__':
main()
| true | true |
f71bf509bb30658310cb6206ecdb34d8c4c8f548 | 6,084 | py | Python | sphinx/testing/path.py | Symaxion/sphinx | f4f7936b5c3671153c2646387c8258b1c4e25e3c | [
"BSD-2-Clause"
] | 8 | 2019-04-27T01:19:45.000Z | 2020-09-21T03:31:01.000Z | sphinx/testing/path.py | JoeyCluett/sphinx | ff5031c96e90027510ad2d0251972e12da46402c | [
"BSD-2-Clause"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | sphinx/testing/path.py | JoeyCluett/sphinx | ff5031c96e90027510ad2d0251972e12da46402c | [
"BSD-2-Clause"
] | 5 | 2019-04-27T01:19:47.000Z | 2020-09-20T15:15:19.000Z | """
sphinx.testing.path
~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import shutil
import sys
if False:
# For type annotation
import builtins # NOQA
from typing import Any, Callable, IO, List # NOQA
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
class path(str):
"""
Represents a path which behaves like a string.
"""
@property
def parent(self):
# type: () -> path
"""
The name of the directory the file or directory is in.
"""
return self.__class__(os.path.dirname(self))
def basename(self):
# type: () -> str
return os.path.basename(self)
def abspath(self):
# type: () -> path
"""
Returns the absolute path.
"""
return self.__class__(os.path.abspath(self))
def isabs(self):
# type: () -> bool
"""
Returns ``True`` if the path is absolute.
"""
return os.path.isabs(self)
def isdir(self):
# type: () -> bool
"""
Returns ``True`` if the path is a directory.
"""
return os.path.isdir(self)
def isfile(self):
# type: () -> bool
"""
Returns ``True`` if the path is a file.
"""
return os.path.isfile(self)
def islink(self):
# type: () -> bool
"""
Returns ``True`` if the path is a symbolic link.
"""
return os.path.islink(self)
def ismount(self):
# type: () -> bool
"""
Returns ``True`` if the path is a mount point.
"""
return os.path.ismount(self)
def rmtree(self, ignore_errors=False, onerror=None):
# type: (bool, Callable) -> None
"""
Removes the file or directory and any files or directories it may
contain.
:param ignore_errors:
If ``True`` errors are silently ignored, otherwise an exception
is raised in case an error occurs.
:param onerror:
A callback which gets called with the arguments `func`, `path` and
`exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove`
or :func:`os.rmdir`. `path` is the argument to the function which
caused it to fail and `exc_info` is a tuple as returned by
:func:`sys.exc_info`.
"""
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
# type: (str, bool) -> None
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
:param symlinks:
If ``True`` symbolic links in the source tree result in symbolic
links in the destination tree otherwise the contents of the files
pointed to by the symbolic links are copied.
"""
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
# type: (str) -> None
"""
Recursively move the file or directory to the given `destination`
similar to the Unix "mv" command.
If the `destination` is a file it may be overwritten depending on the
:func:`os.rename` semantics.
"""
shutil.move(self, destination)
move = movetree
def unlink(self):
# type: () -> None
"""
Removes a file.
"""
os.unlink(self)
def stat(self):
# type: () -> Any
"""
Returns a stat of the file.
"""
return os.stat(self)
def utime(self, arg):
# type: (Any) -> None
os.utime(self, arg)
def open(self, mode='r', **kwargs):
# type: (str, Any) -> IO
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
# type: (str, str, Any) -> None
"""
Writes the given `text` to the file.
"""
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
# type: (str, Any) -> str
"""
Returns the text in the file.
"""
with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
# type: () -> builtins.bytes
"""
Returns the bytes in the file.
"""
with open(self, mode='rb') as f:
return f.read()
def write_bytes(self, bytes, append=False):
# type: (str, bool) -> None
"""
Writes the given `bytes` to the file.
:param append:
If ``True`` given `bytes` are added at the end of the file.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
with open(self, mode=mode) as f:
f.write(bytes)
def exists(self):
# type: () -> bool
"""
Returns ``True`` if the path exist.
"""
return os.path.exists(self)
def lexists(self):
# type: () -> bool
"""
Returns ``True`` if the path exists unless it is a broken symbolic
link.
"""
return os.path.lexists(self)
def makedirs(self, mode=0o777, exist_ok=False):
# type: (int, bool) -> None
"""
Recursively create directories.
"""
os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
# type: (Any) -> path
"""
Joins the path with the argument given and returns the result.
"""
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
# type: () -> List[str]
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
# type: () -> str
return '%s(%s)' % (self.__class__.__name__, super().__repr__())
| 27.160714 | 78 | 0.534845 | import os
import shutil
import sys
if False:
import builtins
from typing import Any, Callable, IO, List
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
class path(str):
@property
def parent(self):
return self.__class__(os.path.dirname(self))
def basename(self):
return os.path.basename(self)
def abspath(self):
return self.__class__(os.path.abspath(self))
def isabs(self):
return os.path.isabs(self)
def isdir(self):
return os.path.isdir(self)
def isfile(self):
return os.path.isfile(self)
def islink(self):
return os.path.islink(self)
def ismount(self):
return os.path.ismount(self)
def rmtree(self, ignore_errors=False, onerror=None):
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
shutil.move(self, destination)
move = movetree
def unlink(self):
os.unlink(self)
def stat(self):
return os.stat(self)
def utime(self, arg):
os.utime(self, arg)
def open(self, mode='r', **kwargs):
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
with open(self, mode='rb') as f:
return f.read()
def write_bytes(self, bytes, append=False):
if append:
mode = 'ab'
else:
mode = 'wb'
with open(self, mode=mode) as f:
f.write(bytes)
def exists(self):
return os.path.exists(self)
def lexists(self):
return os.path.lexists(self)
def makedirs(self, mode=0o777, exist_ok=False):
os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super().__repr__())
| true | true |
f71bf51348a8a1504b096c6228f4179dd5875c54 | 1,538 | py | Python | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 9 | 2015-02-11T09:35:40.000Z | 2019-04-29T23:57:49.000Z | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 2 | 2016-02-07T18:54:47.000Z | 2017-08-10T01:38:01.000Z | consort/tools/ClefSpannerExpression.py | josiah-wolf-oberholtzer/consort | 6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571 | [
"MIT"
] | 1 | 2019-05-13T12:37:15.000Z | 2019-05-13T12:37:15.000Z | from abjad import attach
from abjad import inspect
from abjad import iterate
from abjad.tools import abctools
from abjad.tools import scoretools
class ClefSpannerExpression(abctools.AbjadValueObject):
r'''A clef spanner expression.
'''
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(self):
pass
### SPECIAL METHODS ###
def __call__(self, music, name=None):
import consort
leaves = list(iterate(music).by_leaf())
weights = []
weighted_pitches = []
for leaf in leaves:
weight = float(inspect(leaf).get_duration())
if isinstance(leaf, abjad.Note):
pitch = float(leaf.written_pitch)
weighted_pitch = pitch * weight
weights.append(weight)
weighted_pitches.append(weighted_pitch)
elif isinstance(leaf, abjad.Chord):
for pitch in leaf.written_pitches:
pitch = float(pitch)
weighted_pitch = pitch * weight
weighted_pitches.append(weighted_pitch)
weights.append(weight)
sum_of_weights = sum(weights)
sum_of_weighted_pitches = sum(weighted_pitches)
weighted_average = sum_of_weighted_pitches / sum_of_weights
if weighted_average < 0:
clef_spanner = consort.ClefSpanner('bass')
else:
clef_spanner = consort.ClefSpanner('treble')
attach(clef_spanner, music, name=name)
| 31.387755 | 67 | 0.605982 | from abjad import attach
from abjad import inspect
from abjad import iterate
from abjad.tools import abctools
from abjad.tools import scoretools
class ClefSpannerExpression(abctools.AbjadValueObject):
leaves = list(iterate(music).by_leaf())
weights = []
weighted_pitches = []
for leaf in leaves:
weight = float(inspect(leaf).get_duration())
if isinstance(leaf, abjad.Note):
pitch = float(leaf.written_pitch)
weighted_pitch = pitch * weight
weights.append(weight)
weighted_pitches.append(weighted_pitch)
elif isinstance(leaf, abjad.Chord):
for pitch in leaf.written_pitches:
pitch = float(pitch)
weighted_pitch = pitch * weight
weighted_pitches.append(weighted_pitch)
weights.append(weight)
sum_of_weights = sum(weights)
sum_of_weighted_pitches = sum(weighted_pitches)
weighted_average = sum_of_weighted_pitches / sum_of_weights
if weighted_average < 0:
clef_spanner = consort.ClefSpanner('bass')
else:
clef_spanner = consort.ClefSpanner('treble')
attach(clef_spanner, music, name=name)
| true | true |
f71bf52008d28c422bd88b5ecd34e9e1fab1fa11 | 2,202 | py | Python | models/ModelUtil/util.py | Easonyesheng/StereoCameraToolk | 660e43019d0687e96e6da3aca48c1c423ae5abff | [
"MIT"
] | 27 | 2020-10-16T07:21:35.000Z | 2022-03-11T02:56:13.000Z | models/ModelUtil/util.py | Easonyesheng/StereoCamera | 9319b7f4e5ce36833de722a15e1074e82b8b4f84 | [
"MIT"
] | null | null | null | models/ModelUtil/util.py | Easonyesheng/StereoCamera | 9319b7f4e5ce36833de722a15e1074e82b8b4f84 | [
"MIT"
] | 6 | 2021-02-01T09:54:40.000Z | 2022-03-11T03:16:39.000Z | """Utility """
import numpy as np
import cv2
import os
import logging
def check_string_is_empty(string):
"""name
check string empty or not
Args:
Returns:
"""
if string == '':
return True
return False
def check_numpy_array(array):
"""name
check array empty or not
Args:
Returns:
True - Exist
"""
try:
array.all()
except AttributeError:
return False
return True
def after_cv_imshow():
"""name
close all the show window if press 'esc'
set after cv2.imshow()
Args:
Returns:
"""
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
def save_img_with_prefix(img, path, name):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
cv2.imwrite(os.path.join(path,name+'.jpg'), img)
def img_show(img, name):
"""
"""
cv2.startWindowThread()
img = img / np.max(img)
cv2.imshow(name, img)
after_cv_imshow()
def test_dir_if_not_create(path):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
if os.path.isdir(path):
return True
else:
print('Create New Folder:', path)
os.makedirs(path)
return True
def log_init(logfilename):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
# logging.basicConfig(filename=logfilename, level=logging.INFO)
# logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
# filename=logfilename,
# level=logging.DEBUG)
logger = logging.getLogger() # 不加名称设置root logger
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# 使用FileHandler输出到文件
fh = logging.FileHandler(logfilename, 'w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# 使用StreamHandler输出到屏幕
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# 添加两个Handler
logger.addHandler(ch)
logger.addHandler(fh) | 18.504202 | 108 | 0.583106 |
import numpy as np
import cv2
import os
import logging
def check_string_is_empty(string):
if string == '':
return True
return False
def check_numpy_array(array):
try:
array.all()
except AttributeError:
return False
return True
def after_cv_imshow():
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
def save_img_with_prefix(img, path, name):
cv2.imwrite(os.path.join(path,name+'.jpg'), img)
def img_show(img, name):
cv2.startWindowThread()
img = img / np.max(img)
cv2.imshow(name, img)
after_cv_imshow()
def test_dir_if_not_create(path):
if os.path.isdir(path):
return True
else:
print('Create New Folder:', path)
os.makedirs(path)
return True
def log_init(logfilename):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fh = logging.FileHandler(logfilename, 'w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh) | true | true |
f71bf5a6846601e2c447085faeecf596137b1330 | 4,935 | py | Python | airflow/api/common/experimental/trigger_dag.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 2 | 2020-03-24T14:47:18.000Z | 2020-03-24T14:48:17.000Z | airflow/api/common/experimental/trigger_dag.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 1 | 2021-09-29T17:37:13.000Z | 2021-09-29T17:37:13.000Z | airflow/api/common/experimental/trigger_dag.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggering DAG runs APIs."""
import json
from datetime import datetime
from typing import List, Optional, Union
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
dag_run: DagModel,
run_id: Optional[str],
conf: Optional[Union[dict, str]],
execution_date: Optional[datetime],
replace_microseconds: bool,
) -> List[DagRun]: # pylint: disable=too-many-arguments
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param dag_run: DAG Run model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and 'start_date' in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
"The execution_date [{0}] should be >= start_date [{1}] from DAG's default_args".format(
execution_date.isoformat(),
min_dag_start_date.isoformat()))
if not run_id:
run_id = "{}{}".format(DagRunType.MANUAL.value, execution_date.isoformat())
dag_run_id = dag_run.find(dag_id=dag_id, run_id=run_id)
if dag_run_id:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if isinstance(conf, dict):
run_conf = conf
else:
run_conf = json.loads(conf)
triggers = []
dags_to_trigger = [dag]
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
triggers.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
return triggers
def trigger_dag(
dag_id: str,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> Optional[DagRun]:
"""Triggers execution of DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
def read_store_serialized_dags():
from airflow.configuration import conf
return conf.getboolean('core', 'store_serialized_dags')
dagbag = DagBag(
dag_folder=dag_model.fileloc,
store_serialized_dags=read_store_serialized_dags()
)
dag_run = DagRun()
triggers = _trigger_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| 34.270833 | 104 | 0.680446 |
import json
from datetime import datetime
from typing import List, Optional, Union
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
dag_run: DagModel,
run_id: Optional[str],
conf: Optional[Union[dict, str]],
execution_date: Optional[datetime],
replace_microseconds: bool,
) -> List[DagRun]:
dag = dag_bag.get_dag(dag_id)
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and 'start_date' in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
"The execution_date [{0}] should be >= start_date [{1}] from DAG's default_args".format(
execution_date.isoformat(),
min_dag_start_date.isoformat()))
if not run_id:
run_id = "{}{}".format(DagRunType.MANUAL.value, execution_date.isoformat())
dag_run_id = dag_run.find(dag_id=dag_id, run_id=run_id)
if dag_run_id:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if isinstance(conf, dict):
run_conf = conf
else:
run_conf = json.loads(conf)
triggers = []
dags_to_trigger = [dag]
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
triggers.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
return triggers
def trigger_dag(
dag_id: str,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> Optional[DagRun]:
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
def read_store_serialized_dags():
from airflow.configuration import conf
return conf.getboolean('core', 'store_serialized_dags')
dagbag = DagBag(
dag_folder=dag_model.fileloc,
store_serialized_dags=read_store_serialized_dags()
)
dag_run = DagRun()
triggers = _trigger_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| true | true |
f71bf6d7e4ee3f8699dd771f06d36619668ab2db | 8,225 | py | Python | authors/apps/notify/tests/test_mail_list_model.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/notify/tests/test_mail_list_model.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 46 | 2019-01-08T13:16:41.000Z | 2021-04-30T20:47:08.000Z | authors/apps/notify/tests/test_mail_list_model.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 3 | 2019-01-07T08:21:59.000Z | 2019-09-20T06:43:18.000Z | import json
from authors.apps.authentication.tests.base import BaseTestMethods
from authors.apps.authentication.models import User
from rest_framework.reverse import reverse
from rest_framework import status
class NotificationTests(BaseTestMethods):
def test_create_mail_list(self):
user = self.register_and_loginUser()
token = user.data['token']
auth = f'Bearer {token}'
url = reverse('mail-list-status')
response = self.client.get(url, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
self.assertEqual(
response.data['user']['email'], 'testuser@andela.com')
def test_create_update_notification_status(self):
user = self.register_and_loginUser()
token = user.data['token']
auth = f'Bearer {token}'
url = reverse('mail-list-status')
data = {'recieve_email_notifications': 'false'}
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['recieve_email_notifications'], False)
def test_fetch_all_user_notifications(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# fetch notification object
url = reverse('all-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(notifications.status_code, status.HTTP_200_OK)
self.assertEqual(
notifications.data['count'], 1)
def test_cant_fetch_notifications_for_different_user(self):
# register and login user
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
this_user_2 = User.objects.get(email=user_2.data['email'])
# follow a registered user
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# fetch user notification objects
url = reverse('all-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(
notifications.data['message'],
'You currently dont have any notifications')
def test_fetch_all_user_unread_notifications(self):
# register and login user
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
# follow a registered user
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# fetch notification object
url = reverse('unread-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(notifications.status_code, status.HTTP_200_OK)
self.assertEqual(
notifications.data['count'], 1)
def test_failed_fetch_all_user_unread_notifications(self):
# register and login user
self.register_and_loginUser()
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
# fetch notification object
url = reverse('unread-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'],
'You currently dont have any unread notifications')
def test_fetch_all_user_read_notofications(self):
# register and login user
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
# follow a registered user
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# mark notifications as read
url = reverse('mark-all-as-read')
self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
# fetch notification object
url = reverse('read-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['count'], 1)
def test_failed_fetch_all_user_read_notifications(self):
# register and login user
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
# follow a registered user
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# fetch notification object
url = reverse('read-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'], 'You currently dont have any read notifications')
def test_mark_all_notofications_as_read(self):
# register and login user
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
# follow a registered user
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
# mark notifications as read
url = reverse('mark-all-as-read')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'], 'All notifications marked as read')
| 39.927184 | 92 | 0.646565 | import json
from authors.apps.authentication.tests.base import BaseTestMethods
from authors.apps.authentication.models import User
from rest_framework.reverse import reverse
from rest_framework import status
class NotificationTests(BaseTestMethods):
def test_create_mail_list(self):
user = self.register_and_loginUser()
token = user.data['token']
auth = f'Bearer {token}'
url = reverse('mail-list-status')
response = self.client.get(url, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
self.assertEqual(
response.data['user']['email'], 'testuser@andela.com')
def test_create_update_notification_status(self):
user = self.register_and_loginUser()
token = user.data['token']
auth = f'Bearer {token}'
url = reverse('mail-list-status')
data = {'recieve_email_notifications': 'false'}
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['recieve_email_notifications'], False)
def test_fetch_all_user_notifications(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('all-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(notifications.status_code, status.HTTP_200_OK)
self.assertEqual(
notifications.data['count'], 1)
def test_cant_fetch_notifications_for_different_user(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('all-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(
notifications.data['message'],
'You currently dont have any notifications')
def test_fetch_all_user_unread_notifications(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('unread-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(notifications.status_code, status.HTTP_200_OK)
self.assertEqual(
notifications.data['count'], 1)
def test_failed_fetch_all_user_unread_notifications(self):
self.register_and_loginUser()
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
url = reverse('unread-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'],
'You currently dont have any unread notifications')
def test_fetch_all_user_read_notofications(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('mark-all-as-read')
self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
url = reverse('read-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['count'], 1)
def test_failed_fetch_all_user_read_notifications(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('read-notifications')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'], 'You currently dont have any read notifications')
def test_mark_all_notofications_as_read(self):
user_1 = self.register_and_loginUser()
user_1_token = user_1.data['token']
user_2 = self.register_and_login_user2()
user_2_token = user_2.data['token']
this_user_2 = User.objects.get(email=user_2.data['email'])
response = self.client.post(
'/api/v1/users/{}/profile/follow'.format(this_user_2.username),
HTTP_AUTHORIZATION=f'Bearer {user_1_token}')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)[
'profile']['following'], True
)
url = reverse('mark-all-as-read')
notifications = self.client.get(
url, format='json', HTTP_AUTHORIZATION=f'Bearer {user_2_token}')
self.assertEqual(
notifications.data['message'], 'All notifications marked as read')
| true | true |
f71bf861822822818626a562f2930b021abda1d2 | 596 | py | Python | x2py/__init__.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | x2py/__init__.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | 1 | 2019-06-05T09:35:09.000Z | 2020-07-02T09:46:46.000Z | x2py/__init__.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | # Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
"""Import core names of x2py."""
__version__ = '0.4.3'
from x2py.buffer_transform import BufferTransform
from x2py.builtin_events import *
from x2py.case import Case
from x2py.config import Config
from x2py.coroutine import Coroutine, CoroutineHandler
from x2py.event import Event
from x2py.event_factory import EventFactory
from x2py.event_sink import EventSink
from x2py.flow import Flow
from x2py.hub import Hub
from x2py.link import Link
from x2py.flows import *
from x2py.util import *
from x2py.yields import *
| 27.090909 | 54 | 0.798658 |
__version__ = '0.4.3'
from x2py.buffer_transform import BufferTransform
from x2py.builtin_events import *
from x2py.case import Case
from x2py.config import Config
from x2py.coroutine import Coroutine, CoroutineHandler
from x2py.event import Event
from x2py.event_factory import EventFactory
from x2py.event_sink import EventSink
from x2py.flow import Flow
from x2py.hub import Hub
from x2py.link import Link
from x2py.flows import *
from x2py.util import *
from x2py.yields import *
| true | true |
f71bf97329b821d2dc2e07b818ab784fb1ed22a7 | 2,815 | py | Python | test/lint/check-doc.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | test/lint/check-doc.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | test/lint/check-doc.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
| 42.014925 | 130 | 0.722913 |
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
| true | true |
f71bf97c41d24bf4383061ccf726c43fc54c6be9 | 4,880 | py | Python | py3status/modules/file_status.py | JackDoan/py3status | e7f56fc0bec8c1a625328c3549b88f66ee8b41ab | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/file_status.py | JackDoan/py3status | e7f56fc0bec8c1a625328c3549b88f66ee8b41ab | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/file_status.py | JackDoan/py3status | e7f56fc0bec8c1a625328c3549b88f66ee8b41ab | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Display if files or directories exists.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
format: display format for this module
(default '\?color=path [\?if=path ●|■]')
format_path: format for paths (default '{basename}')
format_path_separator: show separator if more than one (default ' ')
paths: specify a string or a list of paths to check (default None)
thresholds: specify color thresholds to use
(default [(0, 'bad'), (1, 'good')])
Format placeholders:
{format_path} format for paths
{path} number of paths, eg 1, 2, 3
format_path placeholders:
{basename} basename of pathname
{pathname} pathname
Color options:
color_bad: files or directories does not exist
color_good: files or directories exists
Color thresholds:
format:
path: print a color based on the number of paths
Examples:
# add multiple paths with wildcard or with pathnames
```
file_status {
paths = ['/tmp/test*', '~user/test1', '~/Videos/*.mp4']
}
# colorize basenames
file_status {
paths = ['~/.config/i3/modules/*.py']
format = '{format_path}'
format_path = '\?color=good {basename}'
format_path_separator = ', '
}
```
@author obb, Moritz Lüdecke, Cyril Levis (@cyrinux)
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'\u25cf'}
missing
{'color': '#FF0000', 'full_text': u'\u25a0'}
"""
from glob import glob
from os.path import basename, expanduser
STRING_NO_PATHS = "missing paths"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
format = u"\?color=path [\?if=path \u25cf|\u25a0]"
format_path = u"{basename}"
format_path_separator = u" "
paths = None
thresholds = [(0, "bad"), (1, "good")]
class Meta:
deprecated = {
"rename": [
{
"param": "format_available",
"new": "icon_available",
"msg": "obsolete parameter use `icon_available`",
},
{
"param": "format_unavailable",
"new": "icon_unavailable",
"msg": "obsolete parameter use `icon_unavailable`",
},
{
"param": "path",
"new": "paths",
"msg": "obsolete parameter use `paths`",
},
],
"rename_placeholder": [
{"placeholder": "paths", "new": "path", "format_strings": ["format"]}
],
}
def post_config_hook(self):
if not self.paths:
raise Exception(STRING_NO_PATHS)
# icon deprecation
on = getattr(self, "icon_available", u"\u25cf")
off = getattr(self, "icon_unavailable", u"\u25a0")
new_icon = u"\?color=path [\?if=path {}|{}]".format(on, off)
self.format = self.format.replace("{icon}", new_icon)
# convert str to list + expand path
if not isinstance(self.paths, list):
self.paths = [self.paths]
self.paths = list(map(expanduser, self.paths))
self.init = {"format_path": []}
if self.py3.format_contains(self.format, "format_path"):
self.init["format_path"] = self.py3.get_placeholders_list(self.format_path)
def file_status(self):
# init datas
paths = sorted([files for path in self.paths for files in glob(path)])
count_path = len(paths)
format_path = None
# format paths
if self.init["format_path"]:
new_data = []
format_path_separator = self.py3.safe_format(self.format_path_separator)
for pathname in paths:
path = {}
for key in self.init["format_path"]:
if key == "basename":
value = basename(pathname)
elif key == "pathname":
value = pathname
else:
continue
path[key] = self.py3.safe_format(value)
new_data.append(self.py3.safe_format(self.format_path, path))
format_path = self.py3.composite_join(format_path_separator, new_data)
if self.thresholds:
self.py3.threshold_get_color(count_path, "path")
self.py3.threshold_get_color(count_path, "paths")
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format,
{"path": count_path, "paths": count_path, "format_path": format_path},
),
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 30.310559 | 87 | 0.565369 |
from glob import glob
from os.path import basename, expanduser
STRING_NO_PATHS = "missing paths"
class Py3status:
cache_timeout = 10
format = u"\?color=path [\?if=path \u25cf|\u25a0]"
format_path = u"{basename}"
format_path_separator = u" "
paths = None
thresholds = [(0, "bad"), (1, "good")]
class Meta:
deprecated = {
"rename": [
{
"param": "format_available",
"new": "icon_available",
"msg": "obsolete parameter use `icon_available`",
},
{
"param": "format_unavailable",
"new": "icon_unavailable",
"msg": "obsolete parameter use `icon_unavailable`",
},
{
"param": "path",
"new": "paths",
"msg": "obsolete parameter use `paths`",
},
],
"rename_placeholder": [
{"placeholder": "paths", "new": "path", "format_strings": ["format"]}
],
}
def post_config_hook(self):
if not self.paths:
raise Exception(STRING_NO_PATHS)
on = getattr(self, "icon_available", u"\u25cf")
off = getattr(self, "icon_unavailable", u"\u25a0")
new_icon = u"\?color=path [\?if=path {}|{}]".format(on, off)
self.format = self.format.replace("{icon}", new_icon)
if not isinstance(self.paths, list):
self.paths = [self.paths]
self.paths = list(map(expanduser, self.paths))
self.init = {"format_path": []}
if self.py3.format_contains(self.format, "format_path"):
self.init["format_path"] = self.py3.get_placeholders_list(self.format_path)
def file_status(self):
paths = sorted([files for path in self.paths for files in glob(path)])
count_path = len(paths)
format_path = None
if self.init["format_path"]:
new_data = []
format_path_separator = self.py3.safe_format(self.format_path_separator)
for pathname in paths:
path = {}
for key in self.init["format_path"]:
if key == "basename":
value = basename(pathname)
elif key == "pathname":
value = pathname
else:
continue
path[key] = self.py3.safe_format(value)
new_data.append(self.py3.safe_format(self.format_path, path))
format_path = self.py3.composite_join(format_path_separator, new_data)
if self.thresholds:
self.py3.threshold_get_color(count_path, "path")
self.py3.threshold_get_color(count_path, "paths")
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format,
{"path": count_path, "paths": count_path, "format_path": format_path},
),
}
if __name__ == "__main__":
from py3status.module_test import module_test
module_test(Py3status)
| true | true |
f71bfb7e6138d20826c27d50c937e36b4fb9c103 | 454 | py | Python | lesson_4_transformation/lesson_4_affine_transformation.py | DewMaple/opencv-learning | 51991a5b9badf24cda740c1377f6be30dea91e1d | [
"MIT"
] | null | null | null | lesson_4_transformation/lesson_4_affine_transformation.py | DewMaple/opencv-learning | 51991a5b9badf24cda740c1377f6be30dea91e1d | [
"MIT"
] | null | null | null | lesson_4_transformation/lesson_4_affine_transformation.py | DewMaple/opencv-learning | 51991a5b9badf24cda740c1377f6be30dea91e1d | [
"MIT"
] | 1 | 2018-09-10T15:51:23.000Z | 2018-09-10T15:51:23.000Z | import cv2
import numpy as np
from utils import find_image
image_path = find_image('girls_01.jpg')
img = cv2.imread(image_path)
rows, cols, channel = img.shape
pts_src = np.float32([[50, 50], [200, 50], [50, 200]])
pts_dst = np.float32([[10, 100], [200, 80], [100, 650]])
M = cv2.getAffineTransform(pts_src, pts_dst)
res = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('transformation by three points', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 23.894737 | 56 | 0.707048 | import cv2
import numpy as np
from utils import find_image
image_path = find_image('girls_01.jpg')
img = cv2.imread(image_path)
rows, cols, channel = img.shape
pts_src = np.float32([[50, 50], [200, 50], [50, 200]])
pts_dst = np.float32([[10, 100], [200, 80], [100, 650]])
M = cv2.getAffineTransform(pts_src, pts_dst)
res = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('transformation by three points', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true | true |
f71bfbbb90ec970a284d4548547ab11e37b35557 | 21,393 | py | Python | androidtv/constants.py | deviant-aut/python-androidtv | 4bd5421e107949784b292a5f4a0397875a18e908 | [
"MIT"
] | 10 | 2022-01-17T14:46:04.000Z | 2022-03-19T16:19:06.000Z | androidtv/constants.py | deviant-aut/python-androidtv | 4bd5421e107949784b292a5f4a0397875a18e908 | [
"MIT"
] | 5 | 2022-01-18T20:33:18.000Z | 2022-03-30T15:57:24.000Z | androidtv/constants.py | deviant-aut/python-androidtv | 4bd5421e107949784b292a5f4a0397875a18e908 | [
"MIT"
] | 7 | 2022-01-25T01:26:47.000Z | 2022-03-13T05:54:53.000Z | """Constants used throughout the code.
**Links**
* `ADB key event codes <https://developer.android.com/reference/android/view/KeyEvent>`_
* `MediaSession PlaybackState property <https://developer.android.com/reference/android/media/session/PlaybackState.html>`_
"""
import re
import sys
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
from enum import IntEnum, unique
else: # pragma: no cover
IntEnum = object
def unique(cls):
"""A class decorator that does nothing."""
return cls
@unique
class DeviceEnum(IntEnum):
"""An enum for the various device types."""
BASETV = 0
ANDROIDTV = 1
FIRETV = 2
# Intents
INTENT_LAUNCH = "android.intent.category.LEANBACK_LAUNCHER"
INTENT_LAUNCH_FIRETV = "android.intent.category.LAUNCHER"
INTENT_HOME = "android.intent.category.HOME"
# Customizable commands
CUSTOM_AUDIO_STATE = "audio_state"
CUSTOM_CURRENT_APP = "current_app"
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE = "current_app_media_session_state"
CUSTOM_HDMI_INPUT = "hdmi_input"
CUSTOM_LAUNCH_APP = "launch_app"
CUSTOM_RUNNING_APPS = "running_apps"
CUSTOM_TURN_OFF = "turn_off"
CUSTOM_TURN_ON = "turn_on"
CUSTOMIZABLE_COMMANDS = {
CUSTOM_AUDIO_STATE,
CUSTOM_CURRENT_APP,
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE,
CUSTOM_HDMI_INPUT,
CUSTOM_LAUNCH_APP,
CUSTOM_RUNNING_APPS,
CUSTOM_TURN_OFF,
CUSTOM_TURN_ON,
}
#: The subset of `CUSTOMIZABLE_COMMANDS` that is potentially used in the ``update()`` method
HA_CUSTOMIZABLE_COMMANDS = (
CUSTOM_AUDIO_STATE,
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE,
CUSTOM_HDMI_INPUT,
CUSTOM_LAUNCH_APP,
CUSTOM_RUNNING_APPS,
CUSTOM_TURN_OFF,
CUSTOM_TURN_ON,
)
# echo '1' if the previous shell command was successful
CMD_SUCCESS1 = r" && echo -e '1\c'"
# echo '1' if the previous shell command was successful, echo '0' if it was not
CMD_SUCCESS1_FAILURE0 = r" && echo -e '1\c' || echo -e '0\c'"
#: Get the audio state
CMD_AUDIO_STATE = r"dumpsys audio | grep paused | grep -qv 'Buffer Queue' && echo -e '1\c' || (dumpsys audio | grep started | grep -qv 'Buffer Queue' && echo '2\c' || echo '0\c')"
#: Get the audio state for an Android 11 device
CMD_AUDIO_STATE11 = (
"CURRENT_AUDIO_STATE=$(dumpsys audio | sed -r -n '/[0-9]{2}-[0-9]{2}.*player piid:.*state:.*$/h; ${x;p;}') && "
+ r"echo $CURRENT_AUDIO_STATE | grep -q paused && echo -e '1\c' || { echo $CURRENT_AUDIO_STATE | grep -q started && echo '2\c' || echo '0\c' ; }"
)
#: Determine whether the device is awake
CMD_AWAKE = "dumpsys power | grep mWakefulness | grep -q Awake"
#: Parse current application identifier from dumpsys output and assign it to ``CURRENT_APP`` variable (assumes dumpsys output is momentarily set to ``CURRENT_APP`` variable)
CMD_PARSE_CURRENT_APP = "CURRENT_APP=${CURRENT_APP#*ActivityRecord{* * } && CURRENT_APP=${CURRENT_APP#*{* * } && CURRENT_APP=${CURRENT_APP%%/*} && CURRENT_APP=${CURRENT_APP%\\}*}"
#: Parse current application for an Android 11 device
CMD_PARSE_CURRENT_APP11 = "CURRENT_APP=${CURRENT_APP%%/*} && CURRENT_APP=${CURRENT_APP##* }"
#: Assign focused application identifier to ``CURRENT_APP`` variable
CMD_DEFINE_CURRENT_APP_VARIABLE = (
"CURRENT_APP=$(dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp') && " + CMD_PARSE_CURRENT_APP
)
#: Assign focused application identifier to ``CURRENT_APP`` variable for an Android 11 device
CMD_DEFINE_CURRENT_APP_VARIABLE11 = (
"CURRENT_APP=$(dumpsys window windows | grep 'Window #1') && " + CMD_PARSE_CURRENT_APP11
)
#: Output identifier for current/focused application
CMD_CURRENT_APP = CMD_DEFINE_CURRENT_APP_VARIABLE + " && echo $CURRENT_APP"
#: Output identifier for current/focused application for an Android 11 device
CMD_CURRENT_APP11 = CMD_DEFINE_CURRENT_APP_VARIABLE11 + " && echo $CURRENT_APP"
#: Assign focused application identifier to ``CURRENT_APP`` variable (for a Google TV device)
CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV = (
"CURRENT_APP=$(dumpsys activity a . | grep mResumedActivity) && " + CMD_PARSE_CURRENT_APP
)
#: Output identifier for current/focused application (for a Google TV device)
CMD_CURRENT_APP_GOOGLE_TV = CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV + " && echo $CURRENT_APP"
#: Get the HDMI input
CMD_HDMI_INPUT = (
"dumpsys activity starter | grep -E -o '(ExternalTv|HDMI)InputService/HW[0-9]' -m 1 | grep -o 'HW[0-9]'"
)
#: Get the HDMI input for an Android 11 device
CMD_HDMI_INPUT11 = (
"(HDMI=$(dumpsys tv_input | grep 'ResourceClientProfile {.*}' | grep -o -E '(hdmi_port=[0-9]|TV)') && { echo ${HDMI/hdmi_port=/HW} | cut -d' ' -f1 ; }) || "
+ CMD_HDMI_INPUT
)
#: Launch an app if it is not already the current app (assumes the variable ``CURRENT_APP`` has already been set)
CMD_LAUNCH_APP_CONDITION = (
"if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
)
#: Launch an app if it is not already the current app (assumes the variable ``CURRENT_APP`` has already been set) on a Fire TV
CMD_LAUNCH_APP_CONDITION_FIRETV = (
"if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH_FIRETV + " --pct-syskeys 0 1; fi"
)
#: Launch an app if it is not already the current app
CMD_LAUNCH_APP = (
CMD_DEFINE_CURRENT_APP_VARIABLE.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
#: Launch an app if it is not already the current app on an Android 11 device
CMD_LAUNCH_APP11 = (
CMD_DEFINE_CURRENT_APP_VARIABLE11.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
#: Launch an app on a Fire TV device
CMD_LAUNCH_APP_FIRETV = (
CMD_DEFINE_CURRENT_APP_VARIABLE.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION_FIRETV
)
#: Launch an app on a Google TV device
CMD_LAUNCH_APP_GOOGLE_TV = (
CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
#: Get the state from ``dumpsys media_session``; this assumes that the variable ``CURRENT_APP`` has been defined
CMD_MEDIA_SESSION_STATE = "dumpsys media_session | grep -A 100 'Sessions Stack' | grep -A 100 $CURRENT_APP | grep -m 1 'state=PlaybackState {'"
#: Determine the current app and get the state from ``dumpsys media_session``
CMD_CURRENT_APP_MEDIA_SESSION_STATE = CMD_CURRENT_APP + " && " + CMD_MEDIA_SESSION_STATE
#: Determine the current app and get the state from ``dumpsys media_session`` for an Android 11 device
CMD_CURRENT_APP_MEDIA_SESSION_STATE11 = CMD_CURRENT_APP11 + " && " + CMD_MEDIA_SESSION_STATE
#: Determine the current app and get the state from ``dumpsys media_session`` for a Google TV device
CMD_CURRENT_APP_MEDIA_SESSION_STATE_GOOGLE_TV = CMD_CURRENT_APP_GOOGLE_TV + " && " + CMD_MEDIA_SESSION_STATE
#: Get the running apps for an Android TV device
CMD_RUNNING_APPS_ANDROIDTV = "ps -A | grep u0_a"
#: Get the running apps for a Fire TV device
CMD_RUNNING_APPS_FIRETV = "ps | grep u0_a"
#: Get installed apps
CMD_INSTALLED_APPS = "pm list packages"
#: Determine if the device is on
CMD_SCREEN_ON = (
"(dumpsys power | grep 'Display Power' | grep -q 'state=ON' || dumpsys power | grep -q 'mScreenOn=true')"
)
#: Get the "STREAM_MUSIC" block from ``dumpsys audio``
CMD_STREAM_MUSIC = r"dumpsys audio | grep '\- STREAM_MUSIC:' -A 11"
#: Turn off an Android TV device (note: `KEY_POWER = 26` is defined below)
CMD_TURN_OFF_ANDROIDTV = CMD_SCREEN_ON + " && input keyevent 26"
#: Turn off a Fire TV device (note: `KEY_SLEEP = 223` is defined below)
CMD_TURN_OFF_FIRETV = CMD_SCREEN_ON + " && input keyevent 223"
#: Turn on an Android TV device (note: `KEY_POWER = 26` is defined below)
CMD_TURN_ON_ANDROIDTV = CMD_SCREEN_ON + " || input keyevent 26"
#: Turn on a Fire TV device (note: `KEY_POWER = 26` and `KEY_HOME = 3` are defined below)
CMD_TURN_ON_FIRETV = CMD_SCREEN_ON + " || (input keyevent 26 && input keyevent 3)"
#: Get the wake lock size
CMD_WAKE_LOCK_SIZE = "dumpsys power | grep Locks | grep 'size='"
#: Determine if the device is on, the screen is on, and get the wake lock size
CMD_SCREEN_ON_AWAKE_WAKE_LOCK_SIZE = (
CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE
)
# `getprop` commands
CMD_MANUFACTURER = "getprop ro.product.manufacturer"
CMD_MODEL = "getprop ro.product.model"
CMD_SERIALNO = "getprop ro.serialno"
CMD_VERSION = "getprop ro.build.version.release"
# Commands for getting the MAC address
CMD_MAC_WLAN0 = "ip addr show wlan0 | grep -m 1 ether"
CMD_MAC_ETH0 = "ip addr show eth0 | grep -m 1 ether"
#: The command used for getting the device properties
CMD_DEVICE_PROPERTIES = CMD_MANUFACTURER + " && " + CMD_MODEL + " && " + CMD_SERIALNO + " && " + CMD_VERSION
# ADB key event codes
# https://developer.android.com/reference/android/view/KeyEvent
KEY_BACK = 4
KEY_BLUE = 186
KEY_CENTER = 23
KEY_COMPONENT1 = 249
KEY_COMPONENT2 = 250
KEY_COMPOSITE1 = 247
KEY_COMPOSITE2 = 248
KEY_DOWN = 20
KEY_END = 123
KEY_ENTER = 66
KEY_ESCAPE = 111
KEY_FAST_FORWARD = 90
KEY_GREEN = 184
KEY_HDMI1 = 243
KEY_HDMI2 = 244
KEY_HDMI3 = 245
KEY_HDMI4 = 246
KEY_HOME = 3
KEY_INPUT = 178
KEY_LEFT = 21
KEY_MENU = 82
KEY_MOVE_HOME = 122
KEY_MUTE = 164
KEY_NEXT = 87
KEY_PAIRING = 225
KEY_PAUSE = 127
KEY_PLAY = 126
KEY_PLAY_PAUSE = 85
KEY_POWER = 26
KEY_PREVIOUS = 88
KEY_RED = 183
KEY_RESUME = 224
KEY_REWIND = 89
KEY_RIGHT = 22
KEY_SAT = 237
KEY_SEARCH = 84
KEY_SETTINGS = 176
KEY_SLEEP = 223
KEY_SPACE = 62
KEY_STOP = 86
KEY_SUSPEND = 276
KEY_SYSDOWN = 281
KEY_SYSLEFT = 282
KEY_SYSRIGHT = 283
KEY_SYSUP = 280
KEY_TEXT = 233
KEY_TOP = 122
KEY_UP = 19
KEY_VGA = 251
KEY_VOLUME_DOWN = 25
KEY_VOLUME_UP = 24
KEY_WAKEUP = 224
KEY_YELLOW = 185
# Alphanumeric key event codes
KEY_0 = 7
KEY_1 = 8
KEY_2 = 9
KEY_3 = 10
KEY_4 = 11
KEY_5 = 12
KEY_6 = 13
KEY_7 = 14
KEY_8 = 15
KEY_9 = 16
KEY_A = 29
KEY_B = 30
KEY_C = 31
KEY_D = 32
KEY_E = 33
KEY_F = 34
KEY_G = 35
KEY_H = 36
KEY_I = 37
KEY_J = 38
KEY_K = 39
KEY_L = 40
KEY_M = 41
KEY_N = 42
KEY_O = 43
KEY_P = 44
KEY_Q = 45
KEY_R = 46
KEY_S = 47
KEY_T = 48
KEY_U = 49
KEY_V = 50
KEY_W = 51
KEY_X = 52
KEY_Y = 53
KEY_Z = 54
# Android TV keys
KEYS = {
"BACK": KEY_BACK,
"BLUE": KEY_BLUE,
"CENTER": KEY_CENTER,
"COMPONENT1": KEY_COMPONENT1,
"COMPONENT2": KEY_COMPONENT2,
"COMPOSITE1": KEY_COMPOSITE1,
"COMPOSITE2": KEY_COMPOSITE2,
"DOWN": KEY_DOWN,
"END": KEY_END,
"ENTER": KEY_ENTER,
"ESCAPE": KEY_ESCAPE,
"FAST_FORWARD": KEY_FAST_FORWARD,
"GREEN": KEY_GREEN,
"HDMI1": KEY_HDMI1,
"HDMI2": KEY_HDMI2,
"HDMI3": KEY_HDMI3,
"HDMI4": KEY_HDMI4,
"HOME": KEY_HOME,
"INPUT": KEY_INPUT,
"LEFT": KEY_LEFT,
"MENU": KEY_MENU,
"MOVE_HOME": KEY_MOVE_HOME,
"MUTE": KEY_MUTE,
"PAIRING": KEY_PAIRING,
"POWER": KEY_POWER,
"RED": KEY_RED,
"RESUME": KEY_RESUME,
"REWIND": KEY_REWIND,
"RIGHT": KEY_RIGHT,
"SAT": KEY_SAT,
"SEARCH": KEY_SEARCH,
"SETTINGS": KEY_SETTINGS,
"SLEEP": KEY_SLEEP,
"SUSPEND": KEY_SUSPEND,
"SYSDOWN": KEY_SYSDOWN,
"SYSLEFT": KEY_SYSLEFT,
"SYSRIGHT": KEY_SYSRIGHT,
"SYSUP": KEY_SYSUP,
"TEXT": KEY_TEXT,
"TOP": KEY_TOP,
"UP": KEY_UP,
"VGA": KEY_VGA,
"VOLUME_DOWN": KEY_VOLUME_DOWN,
"VOLUME_UP": KEY_VOLUME_UP,
"WAKEUP": KEY_WAKEUP,
"YELLOW": KEY_YELLOW,
}
# Android TV / Fire TV states
STATE_ON = "on"
STATE_IDLE = "idle"
STATE_OFF = "off"
STATE_PLAYING = "playing"
STATE_PAUSED = "paused"
STATE_STANDBY = "standby"
STATE_STOPPED = "stopped"
STATE_UNKNOWN = "unknown"
#: States that are valid (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_STATES = (STATE_IDLE, STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_STANDBY)
#: Properties that can be used to determine the current state (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_STATE_PROPERTIES = ("audio_state", "media_session_state")
#: Properties that can be checked for custom state detection (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_PROPERTIES = VALID_STATE_PROPERTIES + ("wake_lock_size",)
#: The required type for each entry in :py:const:`VALID_PROPERTIES` (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_PROPERTIES_TYPES = {"audio_state": str, "media_session_state": int, "wake_lock_size": int}
# https://developer.android.com/reference/android/media/session/PlaybackState.html
#: States for the :attr:`~androidtv.basetv.basetv.BaseTV.media_session_state` property
MEDIA_SESSION_STATES = {0: None, 1: STATE_STOPPED, 2: STATE_PAUSED, 3: STATE_PLAYING}
# Apps
APP_AE_TV = "com.aetn.aetv.watch"
APP_AMAZON_PRIME_VIDEO = "com.amazon.avod.thirdpartyclient"
APP_AMAZON_VIDEO = "com.amazon.avod"
APP_APPLE_TV_PLUS = "com.apple.atve.android.appletv"
APP_APPLE_TV_PLUS_FIRETV = "com.apple.atve.amazon.appletv"
APP_APPLE_TV_PLUS_SONY = "com.apple.atve.sony.appletv"
APP_ATV_LAUNCHER = "com.google.android.tvlauncher"
APP_BELL_FIBE = "com.quickplay.android.bellmediaplayer"
APP_CBC_GEM = "ca.cbc.android.cbctv"
APP_COMEDY_CENTRAL = "com.vmn.android.comedycentral"
APP_CRAVE = "ca.bellmedia.cravetv"
APP_DAILYMOTION = "com.dailymotion.dailymotion"
APP_DEEZER = "deezer.android.tv"
APP_DISNEY_PLUS = "com.disney.disneyplus"
APP_DISNEY_PLUS_HOTSTAR = "in.startv.hotstar"
APP_DS_PHOTO = "com.synology.dsphoto"
APP_DS_VIDEO = "com.synology.dsvideo"
APP_ES_FILE_EXPLORER = "com.estrongs.android.pop"
APP_FACEBOOK = "com.facebook.katana"
APP_FAWESOME = "com.future.moviesByFawesomeAndroidTV"
APP_FIREFOX = "org.mozilla.tv.firefox"
APP_FIRETV_PACKAGE_LAUNCHER = "com.amazon.tv.launcher"
APP_FIRETV_PACKAGE_SETTINGS = "com.amazon.tv.settings"
APP_FIRETV_STORE = "com.amazon.venezia"
APP_FOOD_NETWORK_GO = "tv.accedo.foodnetwork"
APP_FRANCE_TV = "fr.francetv.pluzz"
APP_GLOBAL_TV = "com.shawmedia.smglobal"
APP_GOOGLE_CAST = "com.google.android.apps.mediashell"
APP_GOOGLE_TV_LAUNCHER = "com.google.android.apps.tv.launcherx"
APP_HAYSTACK_NEWS = "com.haystack.android"
APP_HBO_GO = "eu.hbogo.androidtv.production"
APP_HBO_GO_2 = "com.HBO"
APP_HOICHOI = "com.viewlift.hoichoi"
APP_HULU = "com.hulu.plus"
APP_HUNGAMA_PLAY = "com.hungama.movies.tv"
APP_IMDB_TV = "com.amazon.imdb.tv.android.app"
APP_IPTV = "ru.iptvremote.android.iptv"
APP_IPTV_SMARTERS_PRO = "com.nst.iptvsmarterstvbox"
APP_JELLYFIN_TV = "org.jellyfin.androidtv"
APP_JIO_CINEMA = "com.jio.media.stb.ondemand"
APP_KODI = "org.xbmc.kodi"
APP_LIVE_CHANNELS = "com.google.android.tv"
APP_MIJN_RADIO = "org.samsonsen.nederlandse.radio.holland.nl"
APP_MOLOTOV = "tv.molotov.app"
APP_MRMC = "tv.mrmc.mrmc"
APP_MRMC_LITE = "tv.mrmc.mrmc.lite"
APP_MX_PLAYER = "com.mxtech.videoplayer.ad"
APP_NETFLIX = "com.netflix.ninja"
APP_NLZIET = "nl.nlziet"
APP_NOS = "nl.nos.app"
APP_NPO = "nl.uitzendinggemist"
APP_OCS = "com.orange.ocsgo"
APP_PLAY_GAMES = "com.google.android.play.games"
APP_PLAY_MUSIC = "com.google.android.music"
APP_PLAY_STORE = "com.android.vending"
APP_PLAY_VIDEOS = "com.google.android.videos"
APP_PLEX = "com.plexapp.android"
APP_PRIME_VIDEO = "com.amazon.amazonvideo.livingroom"
APP_PRIME_VIDEO_FIRETV = "com.amazon.firebat"
APP_SETTINGS = "com.android.tv.settings"
APP_SMART_YOUTUBE_TV = "com.liskovsoft.videomanager"
APP_SONY_ACTION_MENU = "com.sony.dtv.scrums.action"
APP_SONY_ALBUM = "com.sony.dtv.osat.album"
APP_SONY_BRAVIA_SYNC_MENU = "com.sony.dtv.braviasyncmenu"
APP_SONY_BRAVIA_TUTORIALS = "com.sony.dtv.bravialifehack"
APP_SONY_DISCOVER = "com.sony.dtv.discovery"
APP_SONY_HELP = "com.sony.dtv.smarthelp"
APP_SONY_INTERNET_BROWSER = "com.vewd.core.integration.dia"
APP_SONY_LIV = "com.sonyliv"
APP_SONY_MUSIC = "com.sony.dtv.osat.music"
APP_SONY_SCREEN_MIRRORING = "com.sony.dtv.networkapp.wifidirect"
APP_SONY_SELECT = "com.sony.dtv.sonyselect"
APP_SONY_TIMERS = "com.sony.dtv.timers"
APP_SONY_TV = "com.sony.dtv.tvx"
APP_SONY_VIDEO = "com.sony.dtv.osat.video"
APP_SPORT1 = "de.sport1.firetv.video"
APP_SPOTIFY = "com.spotify.tv.android"
APP_STEAM_LINK = "com.valvesoftware.steamlink"
APP_SYFY = "com.amazon.webapps.nbc.syfy"
APP_T2 = "tv.perception.clients.tv.android"
APP_TED = "com.ted.android.tv"
APP_TUNEIN = "tunein.player"
APP_TVHEADEND = "de.cyberdream.dreamepg.tvh.tv.player"
APP_TWITCH = "tv.twitch.android.app"
APP_TWITCH_FIRETV = "tv.twitch.android.viewer"
APP_VEVO = "com.vevo.tv"
APP_VH1 = "com.mtvn.vh1android"
APP_VIMEO = "com.vimeo.android.videoapp"
APP_VLC = "org.videolan.vlc"
APP_VOYO = "com.phonegap.voyo"
APP_VRV = "com.ellation.vrv"
APP_WAIPU_TV = "de.exaring.waipu.firetv.live"
APP_WATCH_TNT = "com.turner.tnt.android.networkapp"
APP_YOUTUBE = "com.google.android.youtube.tv"
APP_YOUTUBE_FIRETV = "com.amazon.firetv.youtube"
APP_YOUTUBE_KIDS = "com.google.android.youtube.tvkids"
APP_YOUTUBE_KIDS_FIRETV = "com.amazon.firetv.youtube.kids"
APP_YOUTUBE_MUSIC = "com.google.android.youtube.tvmusic"
APP_YOUTUBE_TV = "com.google.android.youtube.tvunplugged"
APP_ZEE5 = "com.graymatrix.did"
APP_ZIGGO_GO_TV = "com.ziggo.tv"
APPS = {
APP_AE_TV: "A&E",
APP_AMAZON_PRIME_VIDEO: "Amazon Prime Video",
APP_AMAZON_VIDEO: "Amazon Video",
APP_APPLE_TV_PLUS: "Apple TV+",
APP_APPLE_TV_PLUS_FIRETV: "Apple TV+ (Fire TV)",
APP_APPLE_TV_PLUS_SONY: "Apple TV+ (Sony)",
APP_ATV_LAUNCHER: "Android TV Launcher",
APP_BELL_FIBE: "Bell Fibe",
APP_CBC_GEM: "CBC Gem",
APP_COMEDY_CENTRAL: "Comedy Central",
APP_CRAVE: "Crave",
APP_DAILYMOTION: "Dailymotion",
APP_DEEZER: "Deezer",
APP_DISNEY_PLUS: "Disney+",
APP_DISNEY_PLUS_HOTSTAR: "Disney+ Hotstar",
APP_DS_PHOTO: "DS photo",
APP_DS_VIDEO: "DS video",
APP_ES_FILE_EXPLORER: "ES File Explorer",
APP_FACEBOOK: "Facebook Watch",
APP_FAWESOME: "Fawsome",
APP_FIREFOX: "Firefox",
APP_FIRETV_STORE: "FireTV Store",
APP_FOOD_NETWORK_GO: "Food Network GO",
APP_FRANCE_TV: "France TV",
APP_GLOBAL_TV: "Global TV",
APP_GOOGLE_CAST: "Google Cast",
APP_GOOGLE_TV_LAUNCHER: "Google TV Launcher",
APP_HAYSTACK_NEWS: "Haystack News",
APP_HBO_GO: "HBO GO",
APP_HBO_GO_2: "HBO GO (2)",
APP_HOICHOI: "Hoichoi",
APP_HULU: "Hulu",
APP_HUNGAMA_PLAY: "Hungama Play",
APP_IMDB_TV: "IMDb TV",
APP_IPTV: "IPTV",
APP_IPTV_SMARTERS_PRO: "IPTV Smarters Pro",
APP_JELLYFIN_TV: "Jellyfin",
APP_JIO_CINEMA: "Jio Cinema",
APP_KODI: "Kodi",
APP_LIVE_CHANNELS: "Live Channels",
APP_MIJN_RADIO: "Mijn Radio",
APP_MOLOTOV: "Molotov",
APP_MRMC: "MrMC",
APP_MRMC_LITE: "MrMC Lite",
APP_MX_PLAYER: "MX Player",
APP_NETFLIX: "Netflix",
APP_NLZIET: "NLZIET",
APP_NOS: "NOS",
APP_NPO: "NPO",
APP_OCS: "OCS",
APP_PLAY_GAMES: "Play Games",
APP_PLAY_MUSIC: "Play Music",
APP_PLAY_STORE: "Play Store",
APP_PLAY_VIDEOS: "Play Movies & TV",
APP_PLEX: "Plex",
APP_PRIME_VIDEO: "Prime Video",
APP_PRIME_VIDEO_FIRETV: "Prime Video (FireTV)",
APP_SETTINGS: "Settings",
APP_SMART_YOUTUBE_TV: "Smart YouTube TV",
APP_SONY_ACTION_MENU: "Action Menu",
APP_SONY_ALBUM: "Album",
APP_SONY_BRAVIA_SYNC_MENU: "Sync Menu",
APP_SONY_BRAVIA_TUTORIALS: "BRAVIA Tutorials",
APP_SONY_DISCOVER: "Discover",
APP_SONY_HELP: "Help",
APP_SONY_INTERNET_BROWSER: "Internet Browser",
APP_SONY_LIV: "SonyLIV",
APP_SONY_MUSIC: "Music",
APP_SONY_SCREEN_MIRRORING: "Screen mirroring",
APP_SONY_SELECT: "Sony Select",
APP_SONY_TIMERS: "Timers",
APP_SONY_TV: "TV",
APP_SONY_VIDEO: "Video",
APP_SPORT1: "Sport 1",
APP_SPOTIFY: "Spotify",
APP_STEAM_LINK: "Steam Link",
APP_SYFY: "Syfy",
APP_T2: "T-2 TV",
APP_TED: "TED",
APP_TUNEIN: "TuneIn Radio",
APP_TVHEADEND: "DreamPlayer TVHeadend",
APP_TWITCH: "Twitch",
APP_TWITCH_FIRETV: "Twitch (FireTV)",
APP_VEVO: "Vevo",
APP_VH1: "VH1",
APP_VIMEO: "Vimeo",
APP_VLC: "VLC",
APP_VOYO: "VOYO",
APP_VRV: "VRV",
APP_WAIPU_TV: "Waipu TV",
APP_WATCH_TNT: "Watch TNT",
APP_YOUTUBE: "YouTube",
APP_YOUTUBE_FIRETV: "YouTube (FireTV)",
APP_YOUTUBE_KIDS: "YouTube Kids",
APP_YOUTUBE_KIDS_FIRETV: "YouTube Kids (FireTV)",
APP_YOUTUBE_MUSIC: "YouTube Music",
APP_YOUTUBE_TV: "YouTube TV",
APP_ZEE5: "ZEE5",
APP_ZIGGO_GO_TV: "Ziggo GO TV",
}
# Regular expressions
REGEX_MEDIA_SESSION_STATE = re.compile(r"state=(?P<state>[0-9]+)", re.MULTILINE)
REGEX_WAKE_LOCK_SIZE = re.compile(r"size=(?P<size>[0-9]+)")
# Regular expression patterns
DEVICE_REGEX_PATTERN = r"Devices: (.*?)\W"
MAC_REGEX_PATTERN = "ether (.*?) brd"
MAX_VOLUME_REGEX_PATTERN = r"Max: (\d{1,})"
MUTED_REGEX_PATTERN = r"Muted: (.*?)\W"
STREAM_MUSIC_REGEX_PATTERN = "STREAM_MUSIC(.*?)- STREAM"
VOLUME_REGEX_PATTERN = r"\): (\d{1,})"
#: Default authentication timeout (in s) for :meth:`adb_shell.handle.tcp_handle.TcpHandle.connect` and :meth:`adb_shell.handle.tcp_handle_async.TcpHandleAsync.connect`
DEFAULT_AUTH_TIMEOUT_S = 10.0
#: Default transport timeout (in s) for :meth:`adb_shell.handle.tcp_handle.TcpHandle.connect` and :meth:`adb_shell.handle.tcp_handle_async.TcpHandleAsync.connect`
DEFAULT_TRANSPORT_TIMEOUT_S = 1.0
#: Default timeout (in s) for :class:`adb_shell.handle.tcp_handle.TcpHandle` and :class:`adb_shell.handle.tcp_handle_async.TcpHandleAsync`
DEFAULT_ADB_TIMEOUT_S = 9.0
#: Default timeout for acquiring the lock that protects ADB commands
DEFAULT_LOCK_TIMEOUT_S = 3.0
| 34.338684 | 179 | 0.72865 |
import re
import sys
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
from enum import IntEnum, unique
else:
IntEnum = object
def unique(cls):
"""A class decorator that does nothing."""
return cls
@unique
class DeviceEnum(IntEnum):
BASETV = 0
ANDROIDTV = 1
FIRETV = 2
INTENT_LAUNCH = "android.intent.category.LEANBACK_LAUNCHER"
INTENT_LAUNCH_FIRETV = "android.intent.category.LAUNCHER"
INTENT_HOME = "android.intent.category.HOME"
CUSTOM_AUDIO_STATE = "audio_state"
CUSTOM_CURRENT_APP = "current_app"
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE = "current_app_media_session_state"
CUSTOM_HDMI_INPUT = "hdmi_input"
CUSTOM_LAUNCH_APP = "launch_app"
CUSTOM_RUNNING_APPS = "running_apps"
CUSTOM_TURN_OFF = "turn_off"
CUSTOM_TURN_ON = "turn_on"
CUSTOMIZABLE_COMMANDS = {
CUSTOM_AUDIO_STATE,
CUSTOM_CURRENT_APP,
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE,
CUSTOM_HDMI_INPUT,
CUSTOM_LAUNCH_APP,
CUSTOM_RUNNING_APPS,
CUSTOM_TURN_OFF,
CUSTOM_TURN_ON,
}
HA_CUSTOMIZABLE_COMMANDS = (
CUSTOM_AUDIO_STATE,
CUSTOM_CURRENT_APP_MEDIA_SESSION_STATE,
CUSTOM_HDMI_INPUT,
CUSTOM_LAUNCH_APP,
CUSTOM_RUNNING_APPS,
CUSTOM_TURN_OFF,
CUSTOM_TURN_ON,
)
CMD_SUCCESS1 = r" && echo -e '1\c'"
CMD_SUCCESS1_FAILURE0 = r" && echo -e '1\c' || echo -e '0\c'"
CMD_AUDIO_STATE = r"dumpsys audio | grep paused | grep -qv 'Buffer Queue' && echo -e '1\c' || (dumpsys audio | grep started | grep -qv 'Buffer Queue' && echo '2\c' || echo '0\c')"
CMD_AUDIO_STATE11 = (
"CURRENT_AUDIO_STATE=$(dumpsys audio | sed -r -n '/[0-9]{2}-[0-9]{2}.*player piid:.*state:.*$/h; ${x;p;}') && "
+ r"echo $CURRENT_AUDIO_STATE | grep -q paused && echo -e '1\c' || { echo $CURRENT_AUDIO_STATE | grep -q started && echo '2\c' || echo '0\c' ; }"
)
CMD_AWAKE = "dumpsys power | grep mWakefulness | grep -q Awake"
CMD_PARSE_CURRENT_APP = "CURRENT_APP=${CURRENT_APP#*ActivityRecord{* * } && CURRENT_APP=${CURRENT_APP#*{* * } && CURRENT_APP=${CURRENT_APP%%/*} && CURRENT_APP=${CURRENT_APP%\\}*}"
CMD_PARSE_CURRENT_APP11 = "CURRENT_APP=${CURRENT_APP%%/*} && CURRENT_APP=${CURRENT_APP##* }"
CMD_DEFINE_CURRENT_APP_VARIABLE = (
"CURRENT_APP=$(dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp') && " + CMD_PARSE_CURRENT_APP
)
CMD_DEFINE_CURRENT_APP_VARIABLE11 = (
"CURRENT_APP=$(dumpsys window windows | grep 'Window #1') && " + CMD_PARSE_CURRENT_APP11
)
CMD_CURRENT_APP = CMD_DEFINE_CURRENT_APP_VARIABLE + " && echo $CURRENT_APP"
CMD_CURRENT_APP11 = CMD_DEFINE_CURRENT_APP_VARIABLE11 + " && echo $CURRENT_APP"
CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV = (
"CURRENT_APP=$(dumpsys activity a . | grep mResumedActivity) && " + CMD_PARSE_CURRENT_APP
)
CMD_CURRENT_APP_GOOGLE_TV = CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV + " && echo $CURRENT_APP"
CMD_HDMI_INPUT = (
"dumpsys activity starter | grep -E -o '(ExternalTv|HDMI)InputService/HW[0-9]' -m 1 | grep -o 'HW[0-9]'"
)
CMD_HDMI_INPUT11 = (
"(HDMI=$(dumpsys tv_input | grep 'ResourceClientProfile {.*}' | grep -o -E '(hdmi_port=[0-9]|TV)') && { echo ${HDMI/hdmi_port=/HW} | cut -d' ' -f1 ; }) || "
+ CMD_HDMI_INPUT
)
CMD_LAUNCH_APP_CONDITION = (
"if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
)
CMD_LAUNCH_APP_CONDITION_FIRETV = (
"if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH_FIRETV + " --pct-syskeys 0 1; fi"
)
CMD_LAUNCH_APP = (
CMD_DEFINE_CURRENT_APP_VARIABLE.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
CMD_LAUNCH_APP11 = (
CMD_DEFINE_CURRENT_APP_VARIABLE11.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
CMD_LAUNCH_APP_FIRETV = (
CMD_DEFINE_CURRENT_APP_VARIABLE.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION_FIRETV
)
CMD_LAUNCH_APP_GOOGLE_TV = (
CMD_DEFINE_CURRENT_APP_VARIABLE_GOOGLE_TV.replace("{", "{{").replace("}", "}}") + " && " + CMD_LAUNCH_APP_CONDITION
)
CMD_MEDIA_SESSION_STATE = "dumpsys media_session | grep -A 100 'Sessions Stack' | grep -A 100 $CURRENT_APP | grep -m 1 'state=PlaybackState {'"
CMD_CURRENT_APP_MEDIA_SESSION_STATE = CMD_CURRENT_APP + " && " + CMD_MEDIA_SESSION_STATE
CMD_CURRENT_APP_MEDIA_SESSION_STATE11 = CMD_CURRENT_APP11 + " && " + CMD_MEDIA_SESSION_STATE
CMD_CURRENT_APP_MEDIA_SESSION_STATE_GOOGLE_TV = CMD_CURRENT_APP_GOOGLE_TV + " && " + CMD_MEDIA_SESSION_STATE
CMD_RUNNING_APPS_ANDROIDTV = "ps -A | grep u0_a"
CMD_RUNNING_APPS_FIRETV = "ps | grep u0_a"
CMD_INSTALLED_APPS = "pm list packages"
CMD_SCREEN_ON = (
"(dumpsys power | grep 'Display Power' | grep -q 'state=ON' || dumpsys power | grep -q 'mScreenOn=true')"
)
CMD_STREAM_MUSIC = r"dumpsys audio | grep '\- STREAM_MUSIC:' -A 11"
CMD_TURN_OFF_ANDROIDTV = CMD_SCREEN_ON + " && input keyevent 26"
CMD_TURN_OFF_FIRETV = CMD_SCREEN_ON + " && input keyevent 223"
CMD_TURN_ON_ANDROIDTV = CMD_SCREEN_ON + " || input keyevent 26"
CMD_TURN_ON_FIRETV = CMD_SCREEN_ON + " || (input keyevent 26 && input keyevent 3)"
CMD_WAKE_LOCK_SIZE = "dumpsys power | grep Locks | grep 'size='"
CMD_SCREEN_ON_AWAKE_WAKE_LOCK_SIZE = (
CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE
)
CMD_MANUFACTURER = "getprop ro.product.manufacturer"
CMD_MODEL = "getprop ro.product.model"
CMD_SERIALNO = "getprop ro.serialno"
CMD_VERSION = "getprop ro.build.version.release"
CMD_MAC_WLAN0 = "ip addr show wlan0 | grep -m 1 ether"
CMD_MAC_ETH0 = "ip addr show eth0 | grep -m 1 ether"
CMD_DEVICE_PROPERTIES = CMD_MANUFACTURER + " && " + CMD_MODEL + " && " + CMD_SERIALNO + " && " + CMD_VERSION
KEY_BACK = 4
KEY_BLUE = 186
KEY_CENTER = 23
KEY_COMPONENT1 = 249
KEY_COMPONENT2 = 250
KEY_COMPOSITE1 = 247
KEY_COMPOSITE2 = 248
KEY_DOWN = 20
KEY_END = 123
KEY_ENTER = 66
KEY_ESCAPE = 111
KEY_FAST_FORWARD = 90
KEY_GREEN = 184
KEY_HDMI1 = 243
KEY_HDMI2 = 244
KEY_HDMI3 = 245
KEY_HDMI4 = 246
KEY_HOME = 3
KEY_INPUT = 178
KEY_LEFT = 21
KEY_MENU = 82
KEY_MOVE_HOME = 122
KEY_MUTE = 164
KEY_NEXT = 87
KEY_PAIRING = 225
KEY_PAUSE = 127
KEY_PLAY = 126
KEY_PLAY_PAUSE = 85
KEY_POWER = 26
KEY_PREVIOUS = 88
KEY_RED = 183
KEY_RESUME = 224
KEY_REWIND = 89
KEY_RIGHT = 22
KEY_SAT = 237
KEY_SEARCH = 84
KEY_SETTINGS = 176
KEY_SLEEP = 223
KEY_SPACE = 62
KEY_STOP = 86
KEY_SUSPEND = 276
KEY_SYSDOWN = 281
KEY_SYSLEFT = 282
KEY_SYSRIGHT = 283
KEY_SYSUP = 280
KEY_TEXT = 233
KEY_TOP = 122
KEY_UP = 19
KEY_VGA = 251
KEY_VOLUME_DOWN = 25
KEY_VOLUME_UP = 24
KEY_WAKEUP = 224
KEY_YELLOW = 185
KEY_0 = 7
KEY_1 = 8
KEY_2 = 9
KEY_3 = 10
KEY_4 = 11
KEY_5 = 12
KEY_6 = 13
KEY_7 = 14
KEY_8 = 15
KEY_9 = 16
KEY_A = 29
KEY_B = 30
KEY_C = 31
KEY_D = 32
KEY_E = 33
KEY_F = 34
KEY_G = 35
KEY_H = 36
KEY_I = 37
KEY_J = 38
KEY_K = 39
KEY_L = 40
KEY_M = 41
KEY_N = 42
KEY_O = 43
KEY_P = 44
KEY_Q = 45
KEY_R = 46
KEY_S = 47
KEY_T = 48
KEY_U = 49
KEY_V = 50
KEY_W = 51
KEY_X = 52
KEY_Y = 53
KEY_Z = 54
KEYS = {
"BACK": KEY_BACK,
"BLUE": KEY_BLUE,
"CENTER": KEY_CENTER,
"COMPONENT1": KEY_COMPONENT1,
"COMPONENT2": KEY_COMPONENT2,
"COMPOSITE1": KEY_COMPOSITE1,
"COMPOSITE2": KEY_COMPOSITE2,
"DOWN": KEY_DOWN,
"END": KEY_END,
"ENTER": KEY_ENTER,
"ESCAPE": KEY_ESCAPE,
"FAST_FORWARD": KEY_FAST_FORWARD,
"GREEN": KEY_GREEN,
"HDMI1": KEY_HDMI1,
"HDMI2": KEY_HDMI2,
"HDMI3": KEY_HDMI3,
"HDMI4": KEY_HDMI4,
"HOME": KEY_HOME,
"INPUT": KEY_INPUT,
"LEFT": KEY_LEFT,
"MENU": KEY_MENU,
"MOVE_HOME": KEY_MOVE_HOME,
"MUTE": KEY_MUTE,
"PAIRING": KEY_PAIRING,
"POWER": KEY_POWER,
"RED": KEY_RED,
"RESUME": KEY_RESUME,
"REWIND": KEY_REWIND,
"RIGHT": KEY_RIGHT,
"SAT": KEY_SAT,
"SEARCH": KEY_SEARCH,
"SETTINGS": KEY_SETTINGS,
"SLEEP": KEY_SLEEP,
"SUSPEND": KEY_SUSPEND,
"SYSDOWN": KEY_SYSDOWN,
"SYSLEFT": KEY_SYSLEFT,
"SYSRIGHT": KEY_SYSRIGHT,
"SYSUP": KEY_SYSUP,
"TEXT": KEY_TEXT,
"TOP": KEY_TOP,
"UP": KEY_UP,
"VGA": KEY_VGA,
"VOLUME_DOWN": KEY_VOLUME_DOWN,
"VOLUME_UP": KEY_VOLUME_UP,
"WAKEUP": KEY_WAKEUP,
"YELLOW": KEY_YELLOW,
}
STATE_ON = "on"
STATE_IDLE = "idle"
STATE_OFF = "off"
STATE_PLAYING = "playing"
STATE_PAUSED = "paused"
STATE_STANDBY = "standby"
STATE_STOPPED = "stopped"
STATE_UNKNOWN = "unknown"
VALID_STATES = (STATE_IDLE, STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_STANDBY)
VALID_STATE_PROPERTIES = ("audio_state", "media_session_state")
VALID_PROPERTIES = VALID_STATE_PROPERTIES + ("wake_lock_size",)
VALID_PROPERTIES_TYPES = {"audio_state": str, "media_session_state": int, "wake_lock_size": int}
MEDIA_SESSION_STATES = {0: None, 1: STATE_STOPPED, 2: STATE_PAUSED, 3: STATE_PLAYING}
APP_AE_TV = "com.aetn.aetv.watch"
APP_AMAZON_PRIME_VIDEO = "com.amazon.avod.thirdpartyclient"
APP_AMAZON_VIDEO = "com.amazon.avod"
APP_APPLE_TV_PLUS = "com.apple.atve.android.appletv"
APP_APPLE_TV_PLUS_FIRETV = "com.apple.atve.amazon.appletv"
APP_APPLE_TV_PLUS_SONY = "com.apple.atve.sony.appletv"
APP_ATV_LAUNCHER = "com.google.android.tvlauncher"
APP_BELL_FIBE = "com.quickplay.android.bellmediaplayer"
APP_CBC_GEM = "ca.cbc.android.cbctv"
APP_COMEDY_CENTRAL = "com.vmn.android.comedycentral"
APP_CRAVE = "ca.bellmedia.cravetv"
APP_DAILYMOTION = "com.dailymotion.dailymotion"
APP_DEEZER = "deezer.android.tv"
APP_DISNEY_PLUS = "com.disney.disneyplus"
APP_DISNEY_PLUS_HOTSTAR = "in.startv.hotstar"
APP_DS_PHOTO = "com.synology.dsphoto"
APP_DS_VIDEO = "com.synology.dsvideo"
APP_ES_FILE_EXPLORER = "com.estrongs.android.pop"
APP_FACEBOOK = "com.facebook.katana"
APP_FAWESOME = "com.future.moviesByFawesomeAndroidTV"
APP_FIREFOX = "org.mozilla.tv.firefox"
APP_FIRETV_PACKAGE_LAUNCHER = "com.amazon.tv.launcher"
APP_FIRETV_PACKAGE_SETTINGS = "com.amazon.tv.settings"
APP_FIRETV_STORE = "com.amazon.venezia"
APP_FOOD_NETWORK_GO = "tv.accedo.foodnetwork"
APP_FRANCE_TV = "fr.francetv.pluzz"
APP_GLOBAL_TV = "com.shawmedia.smglobal"
APP_GOOGLE_CAST = "com.google.android.apps.mediashell"
APP_GOOGLE_TV_LAUNCHER = "com.google.android.apps.tv.launcherx"
APP_HAYSTACK_NEWS = "com.haystack.android"
APP_HBO_GO = "eu.hbogo.androidtv.production"
APP_HBO_GO_2 = "com.HBO"
APP_HOICHOI = "com.viewlift.hoichoi"
APP_HULU = "com.hulu.plus"
APP_HUNGAMA_PLAY = "com.hungama.movies.tv"
APP_IMDB_TV = "com.amazon.imdb.tv.android.app"
APP_IPTV = "ru.iptvremote.android.iptv"
APP_IPTV_SMARTERS_PRO = "com.nst.iptvsmarterstvbox"
APP_JELLYFIN_TV = "org.jellyfin.androidtv"
APP_JIO_CINEMA = "com.jio.media.stb.ondemand"
APP_KODI = "org.xbmc.kodi"
APP_LIVE_CHANNELS = "com.google.android.tv"
APP_MIJN_RADIO = "org.samsonsen.nederlandse.radio.holland.nl"
APP_MOLOTOV = "tv.molotov.app"
APP_MRMC = "tv.mrmc.mrmc"
APP_MRMC_LITE = "tv.mrmc.mrmc.lite"
APP_MX_PLAYER = "com.mxtech.videoplayer.ad"
APP_NETFLIX = "com.netflix.ninja"
APP_NLZIET = "nl.nlziet"
APP_NOS = "nl.nos.app"
APP_NPO = "nl.uitzendinggemist"
APP_OCS = "com.orange.ocsgo"
APP_PLAY_GAMES = "com.google.android.play.games"
APP_PLAY_MUSIC = "com.google.android.music"
APP_PLAY_STORE = "com.android.vending"
APP_PLAY_VIDEOS = "com.google.android.videos"
APP_PLEX = "com.plexapp.android"
APP_PRIME_VIDEO = "com.amazon.amazonvideo.livingroom"
APP_PRIME_VIDEO_FIRETV = "com.amazon.firebat"
APP_SETTINGS = "com.android.tv.settings"
APP_SMART_YOUTUBE_TV = "com.liskovsoft.videomanager"
APP_SONY_ACTION_MENU = "com.sony.dtv.scrums.action"
APP_SONY_ALBUM = "com.sony.dtv.osat.album"
APP_SONY_BRAVIA_SYNC_MENU = "com.sony.dtv.braviasyncmenu"
APP_SONY_BRAVIA_TUTORIALS = "com.sony.dtv.bravialifehack"
APP_SONY_DISCOVER = "com.sony.dtv.discovery"
APP_SONY_HELP = "com.sony.dtv.smarthelp"
APP_SONY_INTERNET_BROWSER = "com.vewd.core.integration.dia"
APP_SONY_LIV = "com.sonyliv"
APP_SONY_MUSIC = "com.sony.dtv.osat.music"
APP_SONY_SCREEN_MIRRORING = "com.sony.dtv.networkapp.wifidirect"
APP_SONY_SELECT = "com.sony.dtv.sonyselect"
APP_SONY_TIMERS = "com.sony.dtv.timers"
APP_SONY_TV = "com.sony.dtv.tvx"
APP_SONY_VIDEO = "com.sony.dtv.osat.video"
APP_SPORT1 = "de.sport1.firetv.video"
APP_SPOTIFY = "com.spotify.tv.android"
APP_STEAM_LINK = "com.valvesoftware.steamlink"
APP_SYFY = "com.amazon.webapps.nbc.syfy"
APP_T2 = "tv.perception.clients.tv.android"
APP_TED = "com.ted.android.tv"
APP_TUNEIN = "tunein.player"
APP_TVHEADEND = "de.cyberdream.dreamepg.tvh.tv.player"
APP_TWITCH = "tv.twitch.android.app"
APP_TWITCH_FIRETV = "tv.twitch.android.viewer"
APP_VEVO = "com.vevo.tv"
APP_VH1 = "com.mtvn.vh1android"
APP_VIMEO = "com.vimeo.android.videoapp"
APP_VLC = "org.videolan.vlc"
APP_VOYO = "com.phonegap.voyo"
APP_VRV = "com.ellation.vrv"
APP_WAIPU_TV = "de.exaring.waipu.firetv.live"
APP_WATCH_TNT = "com.turner.tnt.android.networkapp"
APP_YOUTUBE = "com.google.android.youtube.tv"
APP_YOUTUBE_FIRETV = "com.amazon.firetv.youtube"
APP_YOUTUBE_KIDS = "com.google.android.youtube.tvkids"
APP_YOUTUBE_KIDS_FIRETV = "com.amazon.firetv.youtube.kids"
APP_YOUTUBE_MUSIC = "com.google.android.youtube.tvmusic"
APP_YOUTUBE_TV = "com.google.android.youtube.tvunplugged"
APP_ZEE5 = "com.graymatrix.did"
APP_ZIGGO_GO_TV = "com.ziggo.tv"
APPS = {
APP_AE_TV: "A&E",
APP_AMAZON_PRIME_VIDEO: "Amazon Prime Video",
APP_AMAZON_VIDEO: "Amazon Video",
APP_APPLE_TV_PLUS: "Apple TV+",
APP_APPLE_TV_PLUS_FIRETV: "Apple TV+ (Fire TV)",
APP_APPLE_TV_PLUS_SONY: "Apple TV+ (Sony)",
APP_ATV_LAUNCHER: "Android TV Launcher",
APP_BELL_FIBE: "Bell Fibe",
APP_CBC_GEM: "CBC Gem",
APP_COMEDY_CENTRAL: "Comedy Central",
APP_CRAVE: "Crave",
APP_DAILYMOTION: "Dailymotion",
APP_DEEZER: "Deezer",
APP_DISNEY_PLUS: "Disney+",
APP_DISNEY_PLUS_HOTSTAR: "Disney+ Hotstar",
APP_DS_PHOTO: "DS photo",
APP_DS_VIDEO: "DS video",
APP_ES_FILE_EXPLORER: "ES File Explorer",
APP_FACEBOOK: "Facebook Watch",
APP_FAWESOME: "Fawsome",
APP_FIREFOX: "Firefox",
APP_FIRETV_STORE: "FireTV Store",
APP_FOOD_NETWORK_GO: "Food Network GO",
APP_FRANCE_TV: "France TV",
APP_GLOBAL_TV: "Global TV",
APP_GOOGLE_CAST: "Google Cast",
APP_GOOGLE_TV_LAUNCHER: "Google TV Launcher",
APP_HAYSTACK_NEWS: "Haystack News",
APP_HBO_GO: "HBO GO",
APP_HBO_GO_2: "HBO GO (2)",
APP_HOICHOI: "Hoichoi",
APP_HULU: "Hulu",
APP_HUNGAMA_PLAY: "Hungama Play",
APP_IMDB_TV: "IMDb TV",
APP_IPTV: "IPTV",
APP_IPTV_SMARTERS_PRO: "IPTV Smarters Pro",
APP_JELLYFIN_TV: "Jellyfin",
APP_JIO_CINEMA: "Jio Cinema",
APP_KODI: "Kodi",
APP_LIVE_CHANNELS: "Live Channels",
APP_MIJN_RADIO: "Mijn Radio",
APP_MOLOTOV: "Molotov",
APP_MRMC: "MrMC",
APP_MRMC_LITE: "MrMC Lite",
APP_MX_PLAYER: "MX Player",
APP_NETFLIX: "Netflix",
APP_NLZIET: "NLZIET",
APP_NOS: "NOS",
APP_NPO: "NPO",
APP_OCS: "OCS",
APP_PLAY_GAMES: "Play Games",
APP_PLAY_MUSIC: "Play Music",
APP_PLAY_STORE: "Play Store",
APP_PLAY_VIDEOS: "Play Movies & TV",
APP_PLEX: "Plex",
APP_PRIME_VIDEO: "Prime Video",
APP_PRIME_VIDEO_FIRETV: "Prime Video (FireTV)",
APP_SETTINGS: "Settings",
APP_SMART_YOUTUBE_TV: "Smart YouTube TV",
APP_SONY_ACTION_MENU: "Action Menu",
APP_SONY_ALBUM: "Album",
APP_SONY_BRAVIA_SYNC_MENU: "Sync Menu",
APP_SONY_BRAVIA_TUTORIALS: "BRAVIA Tutorials",
APP_SONY_DISCOVER: "Discover",
APP_SONY_HELP: "Help",
APP_SONY_INTERNET_BROWSER: "Internet Browser",
APP_SONY_LIV: "SonyLIV",
APP_SONY_MUSIC: "Music",
APP_SONY_SCREEN_MIRRORING: "Screen mirroring",
APP_SONY_SELECT: "Sony Select",
APP_SONY_TIMERS: "Timers",
APP_SONY_TV: "TV",
APP_SONY_VIDEO: "Video",
APP_SPORT1: "Sport 1",
APP_SPOTIFY: "Spotify",
APP_STEAM_LINK: "Steam Link",
APP_SYFY: "Syfy",
APP_T2: "T-2 TV",
APP_TED: "TED",
APP_TUNEIN: "TuneIn Radio",
APP_TVHEADEND: "DreamPlayer TVHeadend",
APP_TWITCH: "Twitch",
APP_TWITCH_FIRETV: "Twitch (FireTV)",
APP_VEVO: "Vevo",
APP_VH1: "VH1",
APP_VIMEO: "Vimeo",
APP_VLC: "VLC",
APP_VOYO: "VOYO",
APP_VRV: "VRV",
APP_WAIPU_TV: "Waipu TV",
APP_WATCH_TNT: "Watch TNT",
APP_YOUTUBE: "YouTube",
APP_YOUTUBE_FIRETV: "YouTube (FireTV)",
APP_YOUTUBE_KIDS: "YouTube Kids",
APP_YOUTUBE_KIDS_FIRETV: "YouTube Kids (FireTV)",
APP_YOUTUBE_MUSIC: "YouTube Music",
APP_YOUTUBE_TV: "YouTube TV",
APP_ZEE5: "ZEE5",
APP_ZIGGO_GO_TV: "Ziggo GO TV",
}
REGEX_MEDIA_SESSION_STATE = re.compile(r"state=(?P<state>[0-9]+)", re.MULTILINE)
REGEX_WAKE_LOCK_SIZE = re.compile(r"size=(?P<size>[0-9]+)")
DEVICE_REGEX_PATTERN = r"Devices: (.*?)\W"
MAC_REGEX_PATTERN = "ether (.*?) brd"
MAX_VOLUME_REGEX_PATTERN = r"Max: (\d{1,})"
MUTED_REGEX_PATTERN = r"Muted: (.*?)\W"
STREAM_MUSIC_REGEX_PATTERN = "STREAM_MUSIC(.*?)- STREAM"
VOLUME_REGEX_PATTERN = r"\): (\d{1,})"
DEFAULT_AUTH_TIMEOUT_S = 10.0
DEFAULT_TRANSPORT_TIMEOUT_S = 1.0
DEFAULT_ADB_TIMEOUT_S = 9.0
DEFAULT_LOCK_TIMEOUT_S = 3.0
| true | true |
f71bfc3874575acdd3b3cfdfd9209ae815ab7e10 | 7,497 | py | Python | tf/experiment6.py | wichtounet/frameworks | e0cac9d4ffbbf0b1e9d2491eb70bf2c6154f313b | [
"MIT"
] | 20 | 2016-12-01T17:39:04.000Z | 2019-08-25T12:50:05.000Z | tf/experiment6.py | wichtounet/frameworks | e0cac9d4ffbbf0b1e9d2491eb70bf2c6154f313b | [
"MIT"
] | 4 | 2017-10-08T13:56:21.000Z | 2019-01-21T12:49:09.000Z | tf/experiment6.py | wichtounet/frameworks | e0cac9d4ffbbf0b1e9d2491eb70bf2c6154f313b | [
"MIT"
] | 7 | 2018-05-09T01:29:20.000Z | 2019-02-09T10:49:39.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import argparse
import gzip
import os
import sys
import time
import os
import math
import numpy
from PIL import Image
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
batch_size = 128
batches = 10009
num_epochs = 5
num_classes = 1000
FLAGS = None
from urllib.request import urlretrieve
from os.path import isfile, isdir
import tarfile
import pickle
def data_type():
return tf.float32
def get_batch():
index = 0
global current_index
global training_images
global training_labels
B = numpy.zeros(shape=(batch_size, 256, 256, 3))
L = numpy.zeros(shape=(batch_size))
while index < batch_size:
try:
img = load_img(training_images[current_index])
B[index] = img_to_array(img)
B[index] /= 255
L[index] = training_labels[current_index]
index = index + 1
current_index = current_index + 1
except:
print("Ignore image {}".format(training_images[current_index]))
current_index = current_index + 1
return B, keras.utils.to_categorical(L, num_classes)
def main(_):
global current_index
global training_images
global training_labels
label_counter = 0
training_images = []
training_labels = []
for subdir, dirs, files in os.walk('/data/datasets/imagenet_resized/train/'):
for folder in dirs:
for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):
for file in folder_files:
training_images.append(os.path.join(folder_subdir, file))
training_labels.append(label_counter)
label_counter = label_counter + 1
nice_n = math.floor(len(training_images) / batch_size) * batch_size
print(nice_n)
print(len(training_images))
print(len(training_labels))
import random
perm = list(range(len(training_images)))
random.shuffle(perm)
training_images = [training_images[index] for index in perm]
training_labels = [training_labels[index] for index in perm]
print("Data is ready...")
train_data_node = tf.placeholder(data_type(), shape=(batch_size, 256, 256, 3))
train_labels_node = tf.placeholder(tf.int64, shape=(batch_size,1000))
# Convolutional weights
conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 16], stddev=0.1, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([16], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 16], stddev=0.1, dtype=data_type()))
conv2_biases = tf.Variable(tf.zeros([16], dtype=data_type()))
conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 32], stddev=0.1, dtype=data_type()))
conv3_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv4_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))
conv4_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv5_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))
conv5_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
# Fully connected weights
fc1_weights = tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.1, dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[2048], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([2048, 1000], stddev=0.1, dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[1000], dtype=data_type()))
def model(data):
# Conv 1
conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Conv 2
conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Conv 3
conv = tf.nn.conv2d(pool, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv3_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Conv 4
conv = tf.nn.conv2d(pool, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv4_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Conv 5
conv = tf.nn.conv2d(pool, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv5_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Fully Connected
reshape = tf.reshape(pool, [batch_size, 2048])
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = train_labels_node))
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(loss)
acc_pred = tf.equal(tf.argmax(logits,1), tf.argmax(train_labels_node,1))
accuracy = tf.reduce_mean(tf.cast(acc_pred, tf.float32))
# Predictions for the current training minibatch.
# train_prediction = tf.nn.softmax(logits)
# Create a local session to run the training.
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run(session = sess)
print('Initialized!')
for epoch in range(0, num_epochs):
current_index = 0
while current_index + batch_size < len(training_images):
start_time = time.time()
b, l = get_batch()
feed_dict = {train_data_node: b, train_labels_node: l}
# Run the optimizer to update weights.
_, batch_loss, batch_accuracy = sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)
end_time = time.time()
print('batch {}/{} loss: {} accuracy: {} duration: {}ms'.format(int(current_index / batch_size), int(nice_n / batch_size), batch_loss, batch_accuracy, 1000 * (end_time - start_time)), flush = True)
print('epoch {}/{}'.format(epoch, num_epochs))
# Finally print the result!
current_index = 0
acc = 0.0
while current_index + batch_size < len(training_images):
b, l = get_batch()
feed_dict = {train_data_node: b, train_labels_node: l}
[batch_accuracy] = sess.run([accuracy], feed_dict=feed_dict)
print('Test batch accuracy:', batch_accuracy, flush = True)
acc += batch_accuracy
acc /= batches
print('Test accuracy: %.1f%%' % acc)
tf.app.run(main=main, argv=[sys.argv[0]])
| 35.870813 | 213 | 0.652528 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import argparse
import gzip
import os
import sys
import time
import os
import math
import numpy
from PIL import Image
import numpy
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
batch_size = 128
batches = 10009
num_epochs = 5
num_classes = 1000
FLAGS = None
from urllib.request import urlretrieve
from os.path import isfile, isdir
import tarfile
import pickle
def data_type():
return tf.float32
def get_batch():
index = 0
global current_index
global training_images
global training_labels
B = numpy.zeros(shape=(batch_size, 256, 256, 3))
L = numpy.zeros(shape=(batch_size))
while index < batch_size:
try:
img = load_img(training_images[current_index])
B[index] = img_to_array(img)
B[index] /= 255
L[index] = training_labels[current_index]
index = index + 1
current_index = current_index + 1
except:
print("Ignore image {}".format(training_images[current_index]))
current_index = current_index + 1
return B, keras.utils.to_categorical(L, num_classes)
def main(_):
global current_index
global training_images
global training_labels
label_counter = 0
training_images = []
training_labels = []
for subdir, dirs, files in os.walk('/data/datasets/imagenet_resized/train/'):
for folder in dirs:
for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):
for file in folder_files:
training_images.append(os.path.join(folder_subdir, file))
training_labels.append(label_counter)
label_counter = label_counter + 1
nice_n = math.floor(len(training_images) / batch_size) * batch_size
print(nice_n)
print(len(training_images))
print(len(training_labels))
import random
perm = list(range(len(training_images)))
random.shuffle(perm)
training_images = [training_images[index] for index in perm]
training_labels = [training_labels[index] for index in perm]
print("Data is ready...")
train_data_node = tf.placeholder(data_type(), shape=(batch_size, 256, 256, 3))
train_labels_node = tf.placeholder(tf.int64, shape=(batch_size,1000))
conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 16], stddev=0.1, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([16], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 16], stddev=0.1, dtype=data_type()))
conv2_biases = tf.Variable(tf.zeros([16], dtype=data_type()))
conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 32], stddev=0.1, dtype=data_type()))
conv3_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv4_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))
conv4_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv5_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))
conv5_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
fc1_weights = tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.1, dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[2048], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([2048, 1000], stddev=0.1, dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[1000], dtype=data_type()))
def model(data):
conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv = tf.nn.conv2d(pool, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv3_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv = tf.nn.conv2d(pool, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv4_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv = tf.nn.conv2d(pool, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv5_biases))
pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
reshape = tf.reshape(pool, [batch_size, 2048])
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
return tf.matmul(hidden, fc2_weights) + fc2_biases
logits = model(train_data_node)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = train_labels_node))
optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(loss)
acc_pred = tf.equal(tf.argmax(logits,1), tf.argmax(train_labels_node,1))
accuracy = tf.reduce_mean(tf.cast(acc_pred, tf.float32))
with tf.Session() as sess:
tf.global_variables_initializer().run(session = sess)
print('Initialized!')
for epoch in range(0, num_epochs):
current_index = 0
while current_index + batch_size < len(training_images):
start_time = time.time()
b, l = get_batch()
feed_dict = {train_data_node: b, train_labels_node: l}
_, batch_loss, batch_accuracy = sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)
end_time = time.time()
print('batch {}/{} loss: {} accuracy: {} duration: {}ms'.format(int(current_index / batch_size), int(nice_n / batch_size), batch_loss, batch_accuracy, 1000 * (end_time - start_time)), flush = True)
print('epoch {}/{}'.format(epoch, num_epochs))
current_index = 0
acc = 0.0
while current_index + batch_size < len(training_images):
b, l = get_batch()
feed_dict = {train_data_node: b, train_labels_node: l}
[batch_accuracy] = sess.run([accuracy], feed_dict=feed_dict)
print('Test batch accuracy:', batch_accuracy, flush = True)
acc += batch_accuracy
acc /= batches
print('Test accuracy: %.1f%%' % acc)
tf.app.run(main=main, argv=[sys.argv[0]])
| true | true |
f71bfd19f426d8f03872939dfd8bb1e2d6d2dcdf | 332 | py | Python | week1/the_real_deal/is_prime.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | null | null | null | week1/the_real_deal/is_prime.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | 1 | 2021-09-16T05:44:31.000Z | 2021-09-16T05:44:31.000Z | week1/the_real_deal/is_prime.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from sum_of_divisors import sum_of_divisors
def is_prime(n):
# If N is prime it could only be devided
# to 1 and N, so sum of divisors has to be
# equal to N + 1
return n + 1 == sum_of_divisors(n)
if __name__ == "__main__":
number = int(input("Number: "))
print (is_prime(number))
| 22.133333 | 46 | 0.653614 |
from sum_of_divisors import sum_of_divisors
def is_prime(n):
return n + 1 == sum_of_divisors(n)
if __name__ == "__main__":
number = int(input("Number: "))
print (is_prime(number))
| true | true |
f71bfd37000184d8a9e59d68561043e6514fa6d5 | 4,211 | py | Python | src/data_imputation_paper/imputation/dl.py | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | 2 | 2022-01-18T09:59:01.000Z | 2022-02-02T10:01:45.000Z | src/data_imputation_paper/imputation/dl.py | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | null | null | null | src/data_imputation_paper/imputation/dl.py | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Dict, List, Optional, Tuple
import pandas as pd
from autokeras import StructuredDataClassifier, StructuredDataRegressor
from tensorflow.keras import Model
from ._base import BaseImputer
logger = logging.getLogger()
class AutoKerasImputer(BaseImputer):
def __init__(
self,
max_trials: Optional[int] = 10,
tuner: Optional[str] = None,
validation_split: Optional[float] = 0.2,
epochs: Optional[int] = 10,
seed: Optional[int] = None
):
"""
Deep Learning-learning based imputation mehtod. It uses AutoKeras to find good architecture/hyperparameters.
Args:
max_trials (Optional[int], optional): maximum number of trials for model selection. Defaults to 10.
tuner (Optional[str], optional): AutoKeras hyperparameter tuning strategy. Defaults to None.
validation_split (Optional[float], optional): validation split for AutoKeras fit. Defaults to 0.2.
epochs (Optional[int], optional): number of epochs for AutoKeras fit. Defaults to 10.
seed (Optional[int], optional): Seed to make behavior deterministic. Defaults to None.
"""
super().__init__(
seed=seed
)
self.max_trials = max_trials
self.epochs = epochs
self.validation_split = validation_split
self.tuner = tuner
self._predictors: Dict[str, Model] = {}
def get_best_hyperparameters(self):
super().get_best_hyperparameters()
return {
column: self._predictors[column].tuner.get_best_hyperparameters()[0].values
for column in self._predictors.keys()
}
def fit(self, data: pd.DataFrame, target_columns: List[str]) -> BaseImputer:
super().fit(data=data, target_columns=target_columns)
# cast categorical columns to strings fixes problems where categories are integer values and treated as regression task
data = self._categorical_columns_to_string(data.copy()) # We don't want to change the input dataframe -> copy it
for target_column in self._target_columns:
missing_mask = data[target_column].isna()
feature_cols = [c for c in self._categorical_columns + self._numerical_columns if c != target_column]
if target_column in self._numerical_columns:
StructuredDataModelSearch = StructuredDataRegressor
elif target_column in self._categorical_columns:
StructuredDataModelSearch = StructuredDataClassifier
self._predictors[target_column] = StructuredDataModelSearch(
column_names=feature_cols,
overwrite=True,
max_trials=self.max_trials,
tuner=self.tuner,
directory="../models"
)
self._predictors[target_column].fit(
x=data.loc[~missing_mask, feature_cols],
y=data.loc[~missing_mask, target_column],
epochs=self.epochs
)
self._fitted = True
return self
def transform(self, data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
super().transform(data=data)
imputed_mask = data[self._target_columns].isna()
# save the original dtypes because ..
dtypes = data.dtypes
# ... dtypes of data need to be same as for fitting
data = self._categorical_columns_to_string(data.copy()) # We don't want to change the input dataframe -> copy it
for target_column in self._target_columns:
feature_cols = [c for c in self._categorical_columns + self._numerical_columns if c != target_column]
missing_mask = data[target_column].isna()
amount_missing_in_columns = missing_mask.sum()
if amount_missing_in_columns > 0:
data.loc[missing_mask, target_column] = self._predictors[target_column].predict(data.loc[missing_mask, feature_cols])[:, 0]
logger.debug(f'Imputed {amount_missing_in_columns} values in column {target_column}')
self._restore_dtype(data, dtypes)
return data, imputed_mask
| 37.265487 | 139 | 0.654001 | import logging
from typing import Dict, List, Optional, Tuple
import pandas as pd
from autokeras import StructuredDataClassifier, StructuredDataRegressor
from tensorflow.keras import Model
from ._base import BaseImputer
logger = logging.getLogger()
class AutoKerasImputer(BaseImputer):
def __init__(
self,
max_trials: Optional[int] = 10,
tuner: Optional[str] = None,
validation_split: Optional[float] = 0.2,
epochs: Optional[int] = 10,
seed: Optional[int] = None
):
super().__init__(
seed=seed
)
self.max_trials = max_trials
self.epochs = epochs
self.validation_split = validation_split
self.tuner = tuner
self._predictors: Dict[str, Model] = {}
def get_best_hyperparameters(self):
super().get_best_hyperparameters()
return {
column: self._predictors[column].tuner.get_best_hyperparameters()[0].values
for column in self._predictors.keys()
}
def fit(self, data: pd.DataFrame, target_columns: List[str]) -> BaseImputer:
super().fit(data=data, target_columns=target_columns)
data = self._categorical_columns_to_string(data.copy())
for target_column in self._target_columns:
missing_mask = data[target_column].isna()
feature_cols = [c for c in self._categorical_columns + self._numerical_columns if c != target_column]
if target_column in self._numerical_columns:
StructuredDataModelSearch = StructuredDataRegressor
elif target_column in self._categorical_columns:
StructuredDataModelSearch = StructuredDataClassifier
self._predictors[target_column] = StructuredDataModelSearch(
column_names=feature_cols,
overwrite=True,
max_trials=self.max_trials,
tuner=self.tuner,
directory="../models"
)
self._predictors[target_column].fit(
x=data.loc[~missing_mask, feature_cols],
y=data.loc[~missing_mask, target_column],
epochs=self.epochs
)
self._fitted = True
return self
def transform(self, data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
super().transform(data=data)
imputed_mask = data[self._target_columns].isna()
# save the original dtypes because ..
dtypes = data.dtypes
# ... dtypes of data need to be same as for fitting
data = self._categorical_columns_to_string(data.copy()) # We don't want to change the input dataframe -> copy it
for target_column in self._target_columns:
feature_cols = [c for c in self._categorical_columns + self._numerical_columns if c != target_column]
missing_mask = data[target_column].isna()
amount_missing_in_columns = missing_mask.sum()
if amount_missing_in_columns > 0:
data.loc[missing_mask, target_column] = self._predictors[target_column].predict(data.loc[missing_mask, feature_cols])[:, 0]
logger.debug(f'Imputed {amount_missing_in_columns} values in column {target_column}')
self._restore_dtype(data, dtypes)
return data, imputed_mask
| true | true |
f71bfd5b0f2615891de5ac70368d3a37c96767b7 | 17,386 | py | Python | HER_mod/rl_modules/get_path_costs.py | schrammlb2/policy-guided-sst | 8dce6619b9c771c39915c60fe9c54270ea1e621e | [
"Apache-2.0"
] | null | null | null | HER_mod/rl_modules/get_path_costs.py | schrammlb2/policy-guided-sst | 8dce6619b9c771c39915c60fe9c54270ea1e621e | [
"Apache-2.0"
] | null | null | null | HER_mod/rl_modules/get_path_costs.py | schrammlb2/policy-guided-sst | 8dce6619b9c771c39915c60fe9c54270ea1e621e | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
from scipy import stats
from HER_mod.rl_modules.tsp import generate_path
from HER_mod.rl_modules.hyperparams import NUM_GOALS, NUM_AGENTS
gd_step_list = [0,2,5, 10, 20, 40]
# NUM_AGENTS = 3
N=200
def get_path_costs(train_pos_agent, train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
# gd_step_list = [0,5,10]
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
# gd_step_list = [0,1]
# num_agents = 2
# num_goals=2
# n=2
pos_time_list = []
vel_time_list = []
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
pos_agent_time_list = []
vel_agent_time_list = []
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
# pos_agent_time_list = []
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.find_shortest_path(pos, goals, gd_steps=0, perm_search=perm_search)
pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
pos_agent_time_list.append(pos_test_time_list)
vel_test_time_list = []
for gd_steps in gd_step_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, perm_search=perm_search)
vel_test_time_list.append(len(min_trajectory))
vel_agent_time_list.append(vel_test_time_list)
pos_time_list.append(pos_agent_time_list)
vel_time_list.append(vel_agent_time_list)
vel_time_list = np.array(vel_time_list).squeeze()
pos_time_list = np.array(pos_time_list).squeeze()
relative_time_change = (vel_time_list-pos_time_list)/pos_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
try:
pickle.dump(vel_time_list, open("velocity_target.pkl", 'wb'))
pickle.dump(pos_time_list, open("no_velocity_target.pkl", 'wb'))
pickle.dump(relative_time_change, open("relative_time_change.pkl", 'wb'))
except:
print("pickle failure")
import pdb
pdb.set_trace()
mean = relative_time_change.mean(axis=0)
t_score = stats.t.ppf(.975, num_agents)
ci = t_score*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs standard HER")
plt.title("Relative Improvement")
plt.savefig(os.path.join('results', "Relative Improvement" + '.png'))
plt.close()
# import pdb
# pdb.set_trace()
# def method_comparison(train_pos_agent, train_vel_agent):
# # method_list = ['random search', "gradient descent", "gradient descent (40 steps)", "random", "0 velocity target"]
# method_list = ['random search', "gradient descent", "random", "0 velocity target"]
# method_runtime_dict = {'greedy': []}
# for method in method_list:
# method_runtime_dict[method] = []
# num_agents = NUM_AGENTS
# num_goals=NUM_GOALS
# n=N
# pos_time_list = []
# vel_time_list = []
# for _ in range(num_agents):
# pos_agent = train_pos_agent()
# vel_agent = train_vel_agent()
# for method in method_runtime_dict.keys():
# method_runtime_dict[method].append([])
# for i in range(n):
# # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# # pos = np.random.rand(2)*2-1
# goals = generate_path(num_goals + 1)
# pos = goals[0]
# goals = goals[1:-1]
# # pos_agent_time_list = []
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method="0 velocity target")
# # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
# method_runtime_dict['greedy'][-1].append(len(min_trajectory))
# # vel_test_time_list = []
# for method in method_list:
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)
# method_runtime_dict[method][-1].append(len(min_trajectory))
# # vel_agent_time_list.append(vel_test_time_list)
# greedy = method_runtime_dict['greedy']
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
# performance_list = [performance_dict[m][0] for m in method_runtime_dict.keys()]
# performance_ci_list = [performance_dict[m][1] for m in method_runtime_dict.keys()]
# relative_time_list = [improvement_dict[m][0] for m in method_list]
# relative_time_ci_list = [improvement_dict[m][1] for m in method_list]
# plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
# plt.xlabel("Method")
# plt.ylabel('Time to complete')
# plt.title('Comparison of velocity target-setting methods')
# plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list)
# plt.savefig(os.path.join('results', "Method comparison -- Performance" + '.png'))
# plt.close()
# plt.xticks(range(len(method_list)), method_list)
# plt.xlabel("Method")
# plt.ylabel('Cost reduction over greedy baseline')
# plt.title('Comparison of velocity target-setting methods')
# plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list)
# plt.savefig(os.path.join('results', "Method comparison -- Relative Improvement" + '.png'))
# plt.close()
def method_comparison(train_pos_agent, train_vel_agent):
method_list = ['random search', "gradient descent", "gradient descent (40 steps)", "random", "0 velocity target"]
# method_list = ['random search', "gradient descent", "random", "0 velocity target"]
method_runtime_dict = {'greedy': []}
for method in method_list:
method_runtime_dict[method] = []
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
pos_time_list = []
vel_time_list = []
failed_counter_dict = {'greedy': 0}
for method in method_list:
failed_counter_dict[method] = 0
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
for method in method_runtime_dict.keys():
method_runtime_dict[method].append([])
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
# pos_agent_time_list = []
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method="0 velocity target")
# pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
if successful:
method_runtime_dict['greedy'][-1].append(len(min_trajectory))
else:
method_runtime_dict['greedy'][-1].append("NULL")
failed_counter_dict['greedy'] += 1
# vel_test_time_list = []
for method in method_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)
if successful:
method_runtime_dict[method][-1].append(len(min_trajectory))
else:
method_runtime_dict[method][-1].append("NULL")
failed_counter_dict[method] += 1
# vel_agent_time_list.append(vel_test_time_list)
success_rates = {method: 1-failed_counter_dict[method]/(num_agents*n) for method in failed_counter_dict.keys()}
greedy = method_runtime_dict['greedy']
agent_performance_dict = {}
mean_performance_dict = {}
ci_performance_dict = {}
improvement_dict = {}
mean_improvement_dict = {}
ci_improvement_dict = {}
t_score = stats.t.ppf(.975, num_agents)
for method in method_runtime_dict.keys():
agent_performance_dict[method] = [[time for time in agent_list if time != "NULL"] for agent_list in method_runtime_dict[method]]
agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]
mean = sum(agent_performance_dict[method])/len(agent_performance_dict[method])
mean_performance_dict[method] = mean
ci_performance_dict[method] = t_score*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])
improvement_list = []
mean_list = []
for agent_ind in range(num_agents):
agent_list = method_runtime_dict[method][agent_ind]
greedy_list = greedy[agent_ind]
improvement_list.append([(agent_list[i] - greedy_list[i])/greedy_list[i] for i in range(n) if (agent_list[i] != "NULL" and greedy_list[i]!= "NULL")])
mean_list.append(sum(improvement_list[agent_ind])/len(improvement_list[agent_ind]))
mean = sum(mean_list)/len(mean_list)
mean_improvement_dict[method] = mean
ci_improvement_dict[method] = t_score*sum([(v-mean)**2 for v in mean_list])**.5/len(mean_list)
# agent_improvement_dict[method] = [[(time - greedy_time)/greedy_time for time in agent_list if time != "NULL"] for agent_list in method_runtime_dict[method]]
# agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]
# mean_performance_dict[method] = sum(agent_performance_dict[method])/len(agent_performance_dict[method])
# ci_performance_dict[method] = 2*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# mean_performance_dict = {method: method_runtime_dict[method] for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
# greedy = method_runtime_dict['greedy']
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
performance_list = [mean_performance_dict[m] for m in method_runtime_dict.keys()]
performance_ci_list = [ci_performance_dict[m] for m in method_runtime_dict.keys()]
relative_time_list = [mean_improvement_dict[m] for m in method_list]
relative_time_ci_list = [ci_improvement_dict[m] for m in method_list]
sr_list = [success_rates[m] for m in method_runtime_dict.keys()]#method_list]
# plt.xticks(range(len(method_list)), method_list)
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Success rate')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(sr_list)), sr_list)
plt.savefig(os.path.join('results', "Method comparison -- Success Rate" + '.png'))
plt.close()
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Time to complete')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Performance" + '.png'))
plt.close()
plt.xticks(range(len(method_list)), method_list)
plt.xlabel("Method")
plt.ylabel('Cost reduction over greedy baseline')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Relative Improvement" + '.png'))
plt.close()
def get_random_search_costs(train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
# gd_step_list = [0,5,10]
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
# gd_step_list = [0,1]
# num_agents = 2
# num_goals=2
# n=2
rand_time_list = []
gd_time_list = []
for _ in range(num_agents):
vel_agent = train_vel_agent()
rand_search_time_list = []
gd_search_time_list = []
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
rand_test_time_list = []
gd_test_time_list = []
for gd_steps in gd_step_list:
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=True, perm_search=perm_search)
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=False, perm_search=perm_search)
print("GD: " + str(min_time))
gd_test_time_list.append(len(min_trajectory))
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_search=True, perm_search=perm_search)
print("random_search: " + str(min_time))
rand_test_time_list.append(len(min_trajectory))
rand_search_time_list.append(rand_test_time_list)
gd_search_time_list.append(gd_test_time_list)
rand_time_list.append(rand_search_time_list)
gd_time_list.append(gd_search_time_list)
rand_time_list = np.array(rand_time_list).squeeze()
gd_time_list = np.array(gd_time_list).squeeze()
# best = np.minimum(rand_time_list.min(axis=2),gd_time_list.min(axis=2))
relative_time_change = (gd_time_list-rand_time_list)/rand_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
# try:
# pickle.dump(vel_time_list, open("velocity_target.pkl", 'wb'))
# pickle.dump(pos_time_list, open("no_velocity_target.pkl", 'wb'))
# pickle.dump(relative_time_change, open("relative_time_change.pkl", 'wb'))
# except:
# print("pickle failure")
# import pdb
# pdb.set_trace()
mean = relative_time_change.mean(axis=0)
ci = 2*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Improvement vs random search" + '.png'))
plt.close()
t_score = stats.t.ppf(.975, num_agents)
rands = rand_time_list.mean(axis=1)
rand_mean = rands.mean(axis=0)
rand_ci = t_score*rands.std(axis=0)/(num_agents**.5)
gds = gd_time_list.mean(axis=1)
gd_mean = gds.mean(axis=0)
gd_ci = t_score*gds.std(axis=0)/(num_agents**.5)
plt.plot(steps, rand_mean, color='red', label='Random Search')
plt.fill_between(steps, rand_mean+rand_ci, rand_mean-rand_ci, alpha=.4, color='red')
plt.plot(steps, gd_mean, color='blue', label='Gradient Descent')
plt.fill_between(steps, gd_mean+gd_ci, gd_mean-gd_ci, alpha=.4, color='blue')
plt.legend()
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Gradient Descent vs random search" + '.png'))
plt.close()
| 44.352041 | 195 | 0.673588 | import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
from scipy import stats
from HER_mod.rl_modules.tsp import generate_path
from HER_mod.rl_modules.hyperparams import NUM_GOALS, NUM_AGENTS
gd_step_list = [0,2,5, 10, 20, 40]
N=200
def get_path_costs(train_pos_agent, train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
pos_time_list = []
vel_time_list = []
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
pos_agent_time_list = []
vel_agent_time_list = []
for i in range(n):
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.find_shortest_path(pos, goals, gd_steps=0, perm_search=perm_search)
pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
pos_agent_time_list.append(pos_test_time_list)
vel_test_time_list = []
for gd_steps in gd_step_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, perm_search=perm_search)
vel_test_time_list.append(len(min_trajectory))
vel_agent_time_list.append(vel_test_time_list)
pos_time_list.append(pos_agent_time_list)
vel_time_list.append(vel_agent_time_list)
vel_time_list = np.array(vel_time_list).squeeze()
pos_time_list = np.array(pos_time_list).squeeze()
relative_time_change = (vel_time_list-pos_time_list)/pos_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
try:
pickle.dump(vel_time_list, open("velocity_target.pkl", 'wb'))
pickle.dump(pos_time_list, open("no_velocity_target.pkl", 'wb'))
pickle.dump(relative_time_change, open("relative_time_change.pkl", 'wb'))
except:
print("pickle failure")
import pdb
pdb.set_trace()
mean = relative_time_change.mean(axis=0)
t_score = stats.t.ppf(.975, num_agents)
ci = t_score*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs standard HER")
plt.title("Relative Improvement")
plt.savefig(os.path.join('results', "Relative Improvement" + '.png'))
plt.close()
[]
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
pos_time_list = []
vel_time_list = []
failed_counter_dict = {'greedy': 0}
for method in method_list:
failed_counter_dict[method] = 0
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
for method in method_runtime_dict.keys():
method_runtime_dict[method].append([])
for i in range(n):
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method="0 velocity target")
if successful:
method_runtime_dict['greedy'][-1].append(len(min_trajectory))
else:
method_runtime_dict['greedy'][-1].append("NULL")
failed_counter_dict['greedy'] += 1
for method in method_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)
if successful:
method_runtime_dict[method][-1].append(len(min_trajectory))
else:
method_runtime_dict[method][-1].append("NULL")
failed_counter_dict[method] += 1
success_rates = {method: 1-failed_counter_dict[method]/(num_agents*n) for method in failed_counter_dict.keys()}
greedy = method_runtime_dict['greedy']
agent_performance_dict = {}
mean_performance_dict = {}
ci_performance_dict = {}
improvement_dict = {}
mean_improvement_dict = {}
ci_improvement_dict = {}
t_score = stats.t.ppf(.975, num_agents)
for method in method_runtime_dict.keys():
agent_performance_dict[method] = [[time for time in agent_list if time != "NULL"] for agent_list in method_runtime_dict[method]]
agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]
mean = sum(agent_performance_dict[method])/len(agent_performance_dict[method])
mean_performance_dict[method] = mean
ci_performance_dict[method] = t_score*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])
improvement_list = []
mean_list = []
for agent_ind in range(num_agents):
agent_list = method_runtime_dict[method][agent_ind]
greedy_list = greedy[agent_ind]
improvement_list.append([(agent_list[i] - greedy_list[i])/greedy_list[i] for i in range(n) if (agent_list[i] != "NULL" and greedy_list[i]!= "NULL")])
mean_list.append(sum(improvement_list[agent_ind])/len(improvement_list[agent_ind]))
mean = sum(mean_list)/len(mean_list)
mean_improvement_dict[method] = mean
ci_improvement_dict[method] = t_score*sum([(v-mean)**2 for v in mean_list])**.5/len(mean_list)
performance_list = [mean_performance_dict[m] for m in method_runtime_dict.keys()]
performance_ci_list = [ci_performance_dict[m] for m in method_runtime_dict.keys()]
relative_time_list = [mean_improvement_dict[m] for m in method_list]
relative_time_ci_list = [ci_improvement_dict[m] for m in method_list]
sr_list = [success_rates[m] for m in method_runtime_dict.keys()]
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Success rate')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(sr_list)), sr_list)
plt.savefig(os.path.join('results', "Method comparison -- Success Rate" + '.png'))
plt.close()
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Time to complete')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Performance" + '.png'))
plt.close()
plt.xticks(range(len(method_list)), method_list)
plt.xlabel("Method")
plt.ylabel('Cost reduction over greedy baseline')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Relative Improvement" + '.png'))
plt.close()
def get_random_search_costs(train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
rand_time_list = []
gd_time_list = []
for _ in range(num_agents):
vel_agent = train_vel_agent()
rand_search_time_list = []
gd_search_time_list = []
for i in range(n):
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
rand_test_time_list = []
gd_test_time_list = []
for gd_steps in gd_step_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=False, perm_search=perm_search)
print("GD: " + str(min_time))
gd_test_time_list.append(len(min_trajectory))
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_search=True, perm_search=perm_search)
print("random_search: " + str(min_time))
rand_test_time_list.append(len(min_trajectory))
rand_search_time_list.append(rand_test_time_list)
gd_search_time_list.append(gd_test_time_list)
rand_time_list.append(rand_search_time_list)
gd_time_list.append(gd_search_time_list)
rand_time_list = np.array(rand_time_list).squeeze()
gd_time_list = np.array(gd_time_list).squeeze()
relative_time_change = (gd_time_list-rand_time_list)/rand_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
mean = relative_time_change.mean(axis=0)
ci = 2*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Improvement vs random search" + '.png'))
plt.close()
t_score = stats.t.ppf(.975, num_agents)
rands = rand_time_list.mean(axis=1)
rand_mean = rands.mean(axis=0)
rand_ci = t_score*rands.std(axis=0)/(num_agents**.5)
gds = gd_time_list.mean(axis=1)
gd_mean = gds.mean(axis=0)
gd_ci = t_score*gds.std(axis=0)/(num_agents**.5)
plt.plot(steps, rand_mean, color='red', label='Random Search')
plt.fill_between(steps, rand_mean+rand_ci, rand_mean-rand_ci, alpha=.4, color='red')
plt.plot(steps, gd_mean, color='blue', label='Gradient Descent')
plt.fill_between(steps, gd_mean+gd_ci, gd_mean-gd_ci, alpha=.4, color='blue')
plt.legend()
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Gradient Descent vs random search" + '.png'))
plt.close()
| true | true |
f71bfd9867454fe2f4dc8062137be298a7bcf44a | 40,230 | py | Python | python/ccxt/async_support/coinbasepro.py | dgdiginex/ccxt | cccd590576cbf48d26cf9e3f65cc54fdd466a139 | [
"MIT"
] | 1 | 2021-02-08T21:56:13.000Z | 2021-02-08T21:56:13.000Z | python/ccxt/async_support/coinbasepro.py | yucelalbar/ccxt | 672510401fba809172fac8272e1af463c778358a | [
"MIT"
] | null | null | null | python/ccxt/async_support/coinbasepro.py | yucelalbar/ccxt | 672510401fba809172fac8272e1af463c778358a | [
"MIT"
] | 2 | 2020-10-13T03:24:08.000Z | 2020-10-15T06:25:07.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import TICK_SIZE
class coinbasepro(Exchange):
def describe(self):
return self.deep_extend(super(coinbasepro, self).describe(), {
'id': 'coinbasepro',
'name': 'Coinbase Pro',
'countries': ['US'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': True,
'createDepositAddress': True,
'createOrder': True,
'deposit': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'1h': 3600,
'6h': 21600,
'1d': 86400,
},
'urls': {
'test': {
'public': 'https://api-public.sandbox.pro.coinbase.com',
'private': 'https://api-public.sandbox.pro.coinbase.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/41764625-63b7ffde-760a-11e8-996d-a6328fa9347a.jpg',
'api': {
'public': 'https://api.pro.coinbase.com',
'private': 'https://api.pro.coinbase.com',
},
'www': 'https://pro.coinbase.com/',
'doc': 'https://docs.pro.coinbase.com',
'fees': [
'https://docs.pro.coinbase.com/#fees',
'https://support.pro.coinbase.com/customer/en/portal/articles/2945310-fees',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'accounts/{id}/transfers',
'coinbase-accounts',
'coinbase-accounts/{id}/addresses',
'fills',
'funding',
'fees',
'margin/profile_information',
'margin/buying_power',
'margin/withdrawal_power',
'margin/withdrawal_power_all',
'margin/exit_plan',
'margin/liquidation_history',
'margin/position_refresh_amounts',
'margin/status',
'oracle',
'orders',
'orders/{id}',
'orders/client:{client_oid}',
'otc/orders',
'payment-methods',
'position',
'profiles',
'profiles/{id}',
'reports/{report_id}',
'transfers',
'transfers/{transfer_id}',
'users/self/trailing-volume',
'users/self/exchange-limits',
'withdrawals/fee-estimate',
],
'post': [
'conversions',
'deposits/coinbase-account',
'deposits/payment-method',
'coinbase-accounts/{id}/addresses',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'profiles/transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/coinbase-account',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/client:{client_oid}',
'orders/{id}',
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True, # complicated tier system per coin
'percentage': True,
'maker': 0.5 / 100, # highest fee of all tiers
'taker': 0.5 / 100, # highest fee of all tiers
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
'exceptions': {
'exact': {
'Insufficient funds': InsufficientFunds,
'NotFound': OrderNotFound,
'Invalid API Key': AuthenticationError,
'invalid signature': AuthenticationError,
'Invalid Passphrase': AuthenticationError,
'Invalid order id': InvalidOrder,
'Private rate limit exceeded': RateLimitExceeded,
'Trading pair not available': PermissionDenied,
'Product not found': InvalidOrder,
},
'broad': {
'Order already done': OrderNotFound,
'order not found': OrderNotFound,
'price too small': InvalidOrder,
'price too precise': InvalidOrder,
'under maintenance': OnMaintenance,
'size is too small': InvalidOrder,
'Cancel only mode': OnMaintenance, # https://github.com/ccxt/ccxt/issues/7690
},
},
})
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
#
# [
# {
# id: 'XTZ',
# name: 'Tezos',
# min_size: '0.000001',
# status: 'online',
# message: '',
# max_precision: '0.000001',
# convertible_to: [],
# details: {
# type: 'crypto',
# symbol: 'Τ',
# network_confirmations: 60,
# sort_order: 53,
# crypto_address_link: 'https://tzstats.com/{{address}}',
# crypto_transaction_link: 'https://tzstats.com/{{txId}}',
# push_payment_methods: ['crypto'],
# group_types: [],
# display_name: '',
# processing_time_seconds: 0,
# min_withdrawal_amount: 1
# }
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
details = self.safe_value(currency, 'details', {})
precision = self.safe_float(currency, 'max_precision')
status = self.safe_string(currency, 'status')
active = (status == 'online')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': self.safe_string(details, 'type'),
'name': name,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(details, 'min_size'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_float(details, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
async def fetch_markets(self, params={}):
response = await self.publicGetProducts(params)
#
# [
# {
# "id":"ZEC-BTC",
# "base_currency":"ZEC",
# "quote_currency":"BTC",
# "base_min_size":"0.01000000",
# "base_max_size":"1500.00000000",
# "quote_increment":"0.00000100",
# "base_increment":"0.00010000",
# "display_name":"ZEC/BTC",
# "min_market_funds":"0.001",
# "max_market_funds":"30",
# "margin_enabled":false,
# "post_only":false,
# "limit_only":false,
# "cancel_only":false,
# "trading_disabled":false,
# "status":"online",
# "status_message":""
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
priceLimits = {
'min': self.safe_float(market, 'quote_increment'),
'max': None,
}
precision = {
'amount': self.safe_float(market, 'base_increment'),
'price': self.safe_float(market, 'quote_increment'),
}
status = self.safe_string(market, 'status')
active = (status == 'online')
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'base_min_size'),
'max': self.safe_float(market, 'base_max_size'),
},
'price': priceLimits,
'cost': {
'min': self.safe_float(market, 'min_market_funds'),
'max': self.safe_float(market, 'max_market_funds'),
},
},
'active': active,
'info': market,
}))
return result
async def fetch_accounts(self, params={}):
response = await self.privateGetAccounts(params)
#
# [
# {
# id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',
# currency: 'BTC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# {
# id: 'f75fa69a-1ad1-4a80-bd61-ee7faa6135a3',
# currency: 'USDC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# ]
#
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'id')
currencyId = self.safe_string(account, 'currency')
code = self.safe_currency_code(currencyId)
result.append({
'id': accountId,
'type': None,
'currency': code,
'info': account,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccounts(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'hold'),
'total': self.safe_float(balance, 'balance'),
}
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
# level 1 - only the best bid and ask
# level 2 - top 50 bids and asks(aggregated)
# level 3 - full order book(non aggregated)
request = {
'id': self.market_id(symbol),
'level': 2, # 1 best bidask, 2 aggregated, 3 full
}
response = await self.publicGetProductsIdBook(self.extend(request, params))
#
# {
# "sequence":1924393896,
# "bids":[
# ["0.01825","24.34811287",2],
# ["0.01824","72.5463",3],
# ["0.01823","424.54298049",6],
# ],
# "asks":[
# ["0.01826","171.10414904",4],
# ["0.01827","22.60427028",1],
# ["0.01828","397.46018784",7],
# ]
# }
#
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'sequence')
return orderbook
def parse_ticker(self, ticker, market=None):
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
timestamp = self.parse8601(self.safe_value(ticker, 'time'))
bid = self.safe_float(ticker, 'bid')
ask = self.safe_float(ticker, 'ask')
last = self.safe_float(ticker, 'price')
symbol = None if (market is None) else market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
# publicGetProductsIdTicker or publicGetProductsIdStats
method = self.safe_string(self.options, 'fetchTickerMethod', 'publicGetProductsIdTicker')
response = await getattr(self, method)(self.extend(request, params))
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
#
# {
# type: 'match',
# trade_id: 82047307,
# maker_order_id: '0f358725-2134-435e-be11-753912a326e0',
# taker_order_id: '252b7002-87a3-425c-ac73-f5b9e23f3caf',
# side: 'sell',
# size: '0.00513192',
# price: '9314.78',
# product_id: 'BTC-USD',
# sequence: 12038915443,
# time: '2020-01-31T20:03:41.158814Z'
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))
marketId = self.safe_string(trade, 'product_id')
symbol = self.safe_symbol(marketId, market, '-')
feeRate = None
feeCurrency = None
takerOrMaker = None
if market is not None:
feeCurrency = market['quote']
if 'liquidity' in trade:
takerOrMaker = 'taker' if (trade['liquidity'] == 'T') else 'maker'
feeRate = market[takerOrMaker]
feeCost = self.safe_float_2(trade, 'fill_fees', 'fee')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
type = None
id = self.safe_string(trade, 'trade_id')
side = 'sell' if (trade['side'] == 'buy') else 'buy'
orderId = self.safe_string(trade, 'order_id')
# Coinbase Pro returns inverted side to fetchMyTrades vs fetchTrades
if orderId is not None:
side = 'buy' if (trade['side'] == 'buy') else 'sell'
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
return {
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'cost': price * amount,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# as of 2018-08-23
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'], # fixes issue #2
}
response = await self.publicGetProductsIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591514160,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02816506
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
granularity = self.timeframes[timeframe]
request = {
'id': market['id'],
'granularity': granularity,
}
if since is not None:
request['start'] = self.iso8601(since)
if limit is None:
# https://docs.pro.coinbase.com/#get-historic-rates
limit = 300 # max = 300
request['end'] = self.iso8601(self.sum((limit - 1) * granularity * 1000, since))
response = await self.publicGetProductsIdCandles(self.extend(request, params))
#
# [
# [1591514160,0.02507,0.02507,0.02507,0.02507,0.02816506],
# [1591514100,0.02507,0.02507,0.02507,0.02507,1.63830323],
# [1591514040,0.02505,0.02507,0.02505,0.02507,0.19918178]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
#
# {
# "iso":"2020-05-12T08:00:51.504Z",
# "epoch":1589270451.504
# }
#
return self.safe_timestamp(response, 'epoch')
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
'canceling': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
market = self.safe_market(marketId, market, '-')
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'price')
filled = self.safe_float(order, 'filled_size')
amount = self.safe_float(order, 'size', filled)
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
cost = self.safe_float(order, 'executed_value')
feeCost = self.safe_float(order, 'fill_fees')
fee = None
if feeCost is not None:
feeCurrencyCode = None
if market is not None:
feeCurrencyCode = market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': None,
}
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
return {
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': market['symbol'],
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
'average': None,
'trades': None,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'all',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'done',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
# oid = str(self.nonce())
request = {
'product_id': self.market_id(symbol),
'side': side,
'size': self.amount_to_precision(symbol, amount),
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrders(self.extend(request, params))
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privateDeleteOrdersId({'id': id})
async def cancel_all_orders(self, symbol=None, params={}):
return await self.privateDeleteOrders(params)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * price
currency = market['quote']
return {
'type': takerOrMaker,
'currency': currency,
'rate': rate,
'cost': float(self.currency_to_precision(currency, rate * cost)),
}
async def fetch_payment_methods(self, params={}):
return await self.privateGetPaymentMethods(params)
async def deposit(self, code, amount, address, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
# deposit from a payment_method, like a bank account
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
# deposit into Coinbase Pro account from a Coinbase account
method += 'CoinbaseAccount'
else:
# deposit methodotherwise we did not receive a supported deposit location
# relevant docs link for the Googlers
# https://docs.pro.coinbase.com/#deposits
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
await self.load_accounts()
currency = None
id = self.safe_string(params, 'id') # account id
if id is None:
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency code argument if no account id specified in params')
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)
id = account['id']
request = {
'id': id,
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetAccountsIdTransfers(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status(self, transaction):
canceled = self.safe_value(transaction, 'canceled_at')
if canceled:
return 'canceled'
processed = self.safe_value(transaction, 'processed_at')
completed = self.safe_value(transaction, 'completed_at')
if completed:
return 'ok'
elif processed and not completed:
return 'failed'
else:
return 'pending'
def parse_transaction(self, transaction, currency=None):
details = self.safe_value(transaction, 'details', {})
id = self.safe_string(transaction, 'id')
txid = self.safe_string(details, 'crypto_transaction_hash')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'processed_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
fee = None
status = self.parse_transaction_status(transaction)
amount = self.safe_float(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
tag = self.safe_string(details, 'destination_tag')
address = self.safe_string(transaction, 'crypto_address', address)
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.urls['api'][api] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
what = nonce + method + request + payload
secret = self.base64_to_binary(self.secret)
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privateGetCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 400) or (code == 404):
if body[0] == '{':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if not isinstance(response, basestring):
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 39.402547 | 187 | 0.492667 |
rt.base.exchange import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import TICK_SIZE
class coinbasepro(Exchange):
def describe(self):
return self.deep_extend(super(coinbasepro, self).describe(), {
'id': 'coinbasepro',
'name': 'Coinbase Pro',
'countries': ['US'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': True,
'createDepositAddress': True,
'createOrder': True,
'deposit': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'1h': 3600,
'6h': 21600,
'1d': 86400,
},
'urls': {
'test': {
'public': 'https://api-public.sandbox.pro.coinbase.com',
'private': 'https://api-public.sandbox.pro.coinbase.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/41764625-63b7ffde-760a-11e8-996d-a6328fa9347a.jpg',
'api': {
'public': 'https://api.pro.coinbase.com',
'private': 'https://api.pro.coinbase.com',
},
'www': 'https://pro.coinbase.com/',
'doc': 'https://docs.pro.coinbase.com',
'fees': [
'https://docs.pro.coinbase.com/#fees',
'https://support.pro.coinbase.com/customer/en/portal/articles/2945310-fees',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'accounts/{id}/transfers',
'coinbase-accounts',
'coinbase-accounts/{id}/addresses',
'fills',
'funding',
'fees',
'margin/profile_information',
'margin/buying_power',
'margin/withdrawal_power',
'margin/withdrawal_power_all',
'margin/exit_plan',
'margin/liquidation_history',
'margin/position_refresh_amounts',
'margin/status',
'oracle',
'orders',
'orders/{id}',
'orders/client:{client_oid}',
'otc/orders',
'payment-methods',
'position',
'profiles',
'profiles/{id}',
'reports/{report_id}',
'transfers',
'transfers/{transfer_id}',
'users/self/trailing-volume',
'users/self/exchange-limits',
'withdrawals/fee-estimate',
],
'post': [
'conversions',
'deposits/coinbase-account',
'deposits/payment-method',
'coinbase-accounts/{id}/addresses',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'profiles/transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/coinbase-account',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/client:{client_oid}',
'orders/{id}',
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
'exceptions': {
'exact': {
'Insufficient funds': InsufficientFunds,
'NotFound': OrderNotFound,
'Invalid API Key': AuthenticationError,
'invalid signature': AuthenticationError,
'Invalid Passphrase': AuthenticationError,
'Invalid order id': InvalidOrder,
'Private rate limit exceeded': RateLimitExceeded,
'Trading pair not available': PermissionDenied,
'Product not found': InvalidOrder,
},
'broad': {
'Order already done': OrderNotFound,
'order not found': OrderNotFound,
'price too small': InvalidOrder,
'price too precise': InvalidOrder,
'under maintenance': OnMaintenance,
'size is too small': InvalidOrder,
'Cancel only mode': OnMaintenance,
},
},
})
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
details = self.safe_value(currency, 'details', {})
precision = self.safe_float(currency, 'max_precision')
status = self.safe_string(currency, 'status')
active = (status == 'online')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': self.safe_string(details, 'type'),
'name': name,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(details, 'min_size'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_float(details, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
async def fetch_markets(self, params={}):
response = await self.publicGetProducts(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
priceLimits = {
'min': self.safe_float(market, 'quote_increment'),
'max': None,
}
precision = {
'amount': self.safe_float(market, 'base_increment'),
'price': self.safe_float(market, 'quote_increment'),
}
status = self.safe_string(market, 'status')
active = (status == 'online')
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'base_min_size'),
'max': self.safe_float(market, 'base_max_size'),
},
'price': priceLimits,
'cost': {
'min': self.safe_float(market, 'min_market_funds'),
'max': self.safe_float(market, 'max_market_funds'),
},
},
'active': active,
'info': market,
}))
return result
async def fetch_accounts(self, params={}):
response = await self.privateGetAccounts(params)
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'id')
currencyId = self.safe_string(account, 'currency')
code = self.safe_currency_code(currencyId)
result.append({
'id': accountId,
'type': None,
'currency': code,
'info': account,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccounts(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'hold'),
'total': self.safe_float(balance, 'balance'),
}
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
'level': 2,
}
response = await self.publicGetProductsIdBook(self.extend(request, params))
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'sequence')
return orderbook
def parse_ticker(self, ticker, market=None):
timestamp = self.parse8601(self.safe_value(ticker, 'time'))
bid = self.safe_float(ticker, 'bid')
ask = self.safe_float(ticker, 'ask')
last = self.safe_float(ticker, 'price')
symbol = None if (market is None) else market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
method = self.safe_string(self.options, 'fetchTickerMethod', 'publicGetProductsIdTicker')
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))
marketId = self.safe_string(trade, 'product_id')
symbol = self.safe_symbol(marketId, market, '-')
feeRate = None
feeCurrency = None
takerOrMaker = None
if market is not None:
feeCurrency = market['quote']
if 'liquidity' in trade:
takerOrMaker = 'taker' if (trade['liquidity'] == 'T') else 'maker'
feeRate = market[takerOrMaker]
feeCost = self.safe_float_2(trade, 'fill_fees', 'fee')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
type = None
id = self.safe_string(trade, 'trade_id')
side = 'sell' if (trade['side'] == 'buy') else 'buy'
orderId = self.safe_string(trade, 'order_id')
if orderId is not None:
side = 'buy' if (trade['side'] == 'buy') else 'sell'
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
return {
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'cost': price * amount,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'], }
response = await self.publicGetProductsIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_timestamp(ohlcv, 0),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
granularity = self.timeframes[timeframe]
request = {
'id': market['id'],
'granularity': granularity,
}
if since is not None:
request['start'] = self.iso8601(since)
if limit is None:
mit = 300
request['end'] = self.iso8601(self.sum((limit - 1) * granularity * 1000, since))
response = await self.publicGetProductsIdCandles(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
return self.safe_timestamp(response, 'epoch')
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
'canceling': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
market = self.safe_market(marketId, market, '-')
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'price')
filled = self.safe_float(order, 'filled_size')
amount = self.safe_float(order, 'size', filled)
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
cost = self.safe_float(order, 'executed_value')
feeCost = self.safe_float(order, 'fill_fees')
fee = None
if feeCost is not None:
feeCurrencyCode = None
if market is not None:
feeCurrencyCode = market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': None,
}
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
return {
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': market['symbol'],
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
'average': None,
'trades': None,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'all',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'done',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'product_id': self.market_id(symbol),
'side': side,
'size': self.amount_to_precision(symbol, amount),
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrders(self.extend(request, params))
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privateDeleteOrdersId({'id': id})
async def cancel_all_orders(self, symbol=None, params={}):
return await self.privateDeleteOrders(params)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * price
currency = market['quote']
return {
'type': takerOrMaker,
'currency': currency,
'rate': rate,
'cost': float(self.currency_to_precision(currency, rate * cost)),
}
async def fetch_payment_methods(self, params={}):
return await self.privateGetPaymentMethods(params)
async def deposit(self, code, amount, address, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
await self.load_accounts()
currency = None
id = self.safe_string(params, 'id')
if id is None:
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency code argument if no account id specified in params')
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)
id = account['id']
request = {
'id': id,
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetAccountsIdTransfers(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status(self, transaction):
canceled = self.safe_value(transaction, 'canceled_at')
if canceled:
return 'canceled'
processed = self.safe_value(transaction, 'processed_at')
completed = self.safe_value(transaction, 'completed_at')
if completed:
return 'ok'
elif processed and not completed:
return 'failed'
else:
return 'pending'
def parse_transaction(self, transaction, currency=None):
details = self.safe_value(transaction, 'details', {})
id = self.safe_string(transaction, 'id')
txid = self.safe_string(details, 'crypto_transaction_hash')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'processed_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
fee = None
status = self.parse_transaction_status(transaction)
amount = self.safe_float(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
tag = self.safe_string(details, 'destination_tag')
address = self.safe_string(transaction, 'crypto_address', address)
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.urls['api'][api] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
what = nonce + method + request + payload
secret = self.base64_to_binary(self.secret)
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privateGetCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 400) or (code == 404):
if body[0] == '{':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback)
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if not isinstance(response, basestring):
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true | true |
f71bff11a394180fe27dbab6c598aaf8b04768c6 | 10,059 | py | Python | mlflow/spacy.py | Roffild/mlflow | 8351d82e6ad4103bc58159175b29b406abb1e052 | [
"Apache-2.0"
] | null | null | null | mlflow/spacy.py | Roffild/mlflow | 8351d82e6ad4103bc58159175b29b406abb1e052 | [
"Apache-2.0"
] | null | null | null | mlflow/spacy.py | Roffild/mlflow | 8351d82e6ad4103bc58159175b29b406abb1e052 | [
"Apache-2.0"
] | null | null | null | """
The ``mlflow.spacy`` module provides an API for logging and loading spaCy models.
This module exports spacy models with the following flavors:
spaCy (native) format
This is the main flavor that can be loaded back into spaCy.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
from __future__ import absolute_import
import logging
import os
import pandas as pd
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelSignature
from mlflow.models.utils import ModelInputExample
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
FLAVOR_NAME = "spacy"
_logger = logging.getLogger(__name__)
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import spacy
return _mlflow_conda_env(
additional_conda_deps=None,
additional_pip_deps=[
"spacy=={}".format(spacy.__version__),
],
additional_conda_channels=None)
def save_model(spacy_model, path, conda_env=None, mlflow_model=Model()):
"""
Save a spaCy model to a path on the local file system.
:param spacy_model: spaCy model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this describes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model.
The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pip': [
'spacy==2.2.3'
]
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
"""
import spacy
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Unable to save MLflow model to {path} - path '{path}' "
"already exists".format(path=path))
model_data_subpath = "model.spacy"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(model_data_path)
# Save spacy-model
spacy_model.to_disk(path=model_data_path)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save the pyfunc flavor if at least one text categorizer in spaCy pipeline
if any([isinstance(pipe_component[1], spacy.pipeline.TextCategorizer)
for pipe_component in spacy_model.pipeline]):
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.spacy",
data=model_data_subpath, env=conda_env_subpath)
else:
_logger.warning(
"Generating only the spacy flavor for the provided spacy model. This means the model "
"can be loaded back via `mlflow.spacy.load_model`, but cannot be loaded back using "
"pyfunc APIs like `mlflow.pyfunc.load_model` or via the `mlflow models` CLI commands. "
"MLflow will only generate the pyfunc flavor for spacy models containing a pipeline "
"component that is an instance of spacy.pipeline.TextCategorizer.")
mlflow_model.add_flavor(FLAVOR_NAME, spacy_version=spacy.__version__, data=model_data_subpath)
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(spacy_model, artifact_path, conda_env=None, registered_model_name=None,
signature: ModelSignature = None, input_example: ModelInputExample = None, **kwargs):
"""
Log a spaCy model as an MLflow artifact for the current run.
:param spacy_model: spaCy model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model.
The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pip': [
'spacy==2.2.3'
]
]
}
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset) and valid
model output (e.g. model predictions generated on the training dataset),
for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
signature = infer_signature(train, model.predict(train))
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``spacy.save_model`` method.
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.spacy,
registered_model_name=registered_model_name,
spacy_model=spacy_model, conda_env=conda_env,
signature=signature, input_example=input_example, **kwargs)
def _load_model(path):
import spacy
path = os.path.abspath(path)
return spacy.load(path)
class _SpacyModelWrapper:
def __init__(self, spacy_model):
self.spacy_model = spacy_model
def predict(self, dataframe):
"""
Only works for predicting using text categorizer.
Not suitable for other pipeline components (e.g: parser)
:param dataframe: pandas dataframe containing texts to be categorized
expected shape is (n_rows,1 column)
:return: dataframe with predictions
"""
if len(dataframe.columns) != 1:
raise MlflowException('Shape of input dataframe must be (n_rows, 1column)')
return pd.DataFrame({
'predictions': dataframe.ix[:, 0].apply(lambda text: self.spacy_model(text).cats)
})
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``spacy`` flavor.
"""
return _SpacyModelWrapper(_load_model(path))
def load_model(model_uri):
"""
Load a spaCy model from a local file (if ``run_id`` is ``None``) or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A spaCy loaded model
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
# Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a
# `data` key; in this case, we assume the model artifact path to be `model.spacy`
spacy_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.spacy"))
return _load_model(path=spacy_model_file_path)
| 43.357759 | 100 | 0.617457 |
from __future__ import absolute_import
import logging
import os
import pandas as pd
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelSignature
from mlflow.models.utils import ModelInputExample
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
FLAVOR_NAME = "spacy"
_logger = logging.getLogger(__name__)
def get_default_conda_env():
import spacy
return _mlflow_conda_env(
additional_conda_deps=None,
additional_pip_deps=[
"spacy=={}".format(spacy.__version__),
],
additional_conda_channels=None)
def save_model(spacy_model, path, conda_env=None, mlflow_model=Model()):
import spacy
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Unable to save MLflow model to {path} - path '{path}' "
"already exists".format(path=path))
model_data_subpath = "model.spacy"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(model_data_path)
spacy_model.to_disk(path=model_data_path)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if any([isinstance(pipe_component[1], spacy.pipeline.TextCategorizer)
for pipe_component in spacy_model.pipeline]):
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.spacy",
data=model_data_subpath, env=conda_env_subpath)
else:
_logger.warning(
"Generating only the spacy flavor for the provided spacy model. This means the model "
"can be loaded back via `mlflow.spacy.load_model`, but cannot be loaded back using "
"pyfunc APIs like `mlflow.pyfunc.load_model` or via the `mlflow models` CLI commands. "
"MLflow will only generate the pyfunc flavor for spacy models containing a pipeline "
"component that is an instance of spacy.pipeline.TextCategorizer.")
mlflow_model.add_flavor(FLAVOR_NAME, spacy_version=spacy.__version__, data=model_data_subpath)
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(spacy_model, artifact_path, conda_env=None, registered_model_name=None,
signature: ModelSignature = None, input_example: ModelInputExample = None, **kwargs):
Model.log(artifact_path=artifact_path, flavor=mlflow.spacy,
registered_model_name=registered_model_name,
spacy_model=spacy_model, conda_env=conda_env,
signature=signature, input_example=input_example, **kwargs)
def _load_model(path):
import spacy
path = os.path.abspath(path)
return spacy.load(path)
class _SpacyModelWrapper:
def __init__(self, spacy_model):
self.spacy_model = spacy_model
def predict(self, dataframe):
if len(dataframe.columns) != 1:
raise MlflowException('Shape of input dataframe must be (n_rows, 1column)')
return pd.DataFrame({
'predictions': dataframe.ix[:, 0].apply(lambda text: self.spacy_model(text).cats)
})
def _load_pyfunc(path):
return _SpacyModelWrapper(_load_model(path))
def load_model(model_uri):
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
spacy_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.spacy"))
return _load_model(path=spacy_model_file_path)
| true | true |
f71bff714d7bf1d24454e59616e23e72f9782452 | 629 | py | Python | December Month Challenge/2LinkedListRandomNodeReservoirSampling.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
] | null | null | null | December Month Challenge/2LinkedListRandomNodeReservoirSampling.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
] | null | null | null | December Month Challenge/2LinkedListRandomNodeReservoirSampling.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
] | null | null | null | import random
class Solution:
def __init__(self, head: ListNode):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
"""
self.head = head
def getRandom(self) -> int:
"""
Returns a random node's value.
"""
scope = 1
chosen_value = 0
curr = self.head
while curr:
if random.random() < 1/scope:
chosen_value = curr.val
curr = curr.next
scope +=1
return chosen_value
| 23.296296 | 90 | 0.497615 | import random
class Solution:
def __init__(self, head: ListNode):
self.head = head
def getRandom(self) -> int:
scope = 1
chosen_value = 0
curr = self.head
while curr:
if random.random() < 1/scope:
chosen_value = curr.val
curr = curr.next
scope +=1
return chosen_value
| true | true |
f71bff92a942ff8d052d7208bd78c572f2d01c55 | 132 | py | Python | artistapp/artist/admin.py | fallprojects/ArtistApp | 5564a1f7f4fc95261beb462abfa4ca53f3e5c17f | [
"MIT"
] | null | null | null | artistapp/artist/admin.py | fallprojects/ArtistApp | 5564a1f7f4fc95261beb462abfa4ca53f3e5c17f | [
"MIT"
] | null | null | null | artistapp/artist/admin.py | fallprojects/ArtistApp | 5564a1f7f4fc95261beb462abfa4ca53f3e5c17f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register([Content,Profile,Comment])
| 22 | 46 | 0.787879 | from django.contrib import admin
from .models import *
admin.site.register([Content,Profile,Comment])
| true | true |
f71c00b21518c6757cb661f74b98f03394d9f8a6 | 749 | py | Python | parqueo/urls.py | gersonjuarez/Laboratorio | 3ed5dc57136ec593e3edb8ef8ca3b29abeb7dabc | [
"bzip2-1.0.6"
] | null | null | null | parqueo/urls.py | gersonjuarez/Laboratorio | 3ed5dc57136ec593e3edb8ef8ca3b29abeb7dabc | [
"bzip2-1.0.6"
] | null | null | null | parqueo/urls.py | gersonjuarez/Laboratorio | 3ed5dc57136ec593e3edb8ef8ca3b29abeb7dabc | [
"bzip2-1.0.6"
] | null | null | null | """parqueo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.045455 | 77 | 0.708945 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f71c0110baa8d07d6722fcdc95decbbb7f63ec63 | 2,576 | py | Python | arviz/plots/backends/matplotlib/distplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | arviz/plots/backends/matplotlib/distplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | arviz/plots/backends/matplotlib/distplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | """Matplotlib distplot."""
import warnings
import matplotlib.pyplot as plt
import numpy as np
from . import backend_show
from ...kdeplot import plot_kde
from ...plot_utils import matplotlib_kwarg_dealiaser
from ....numeric_utils import get_bins
def plot_dist(
values,
values2,
color,
kind,
cumulative,
label,
rotated,
rug,
bw,
quantiles,
contour,
fill_last,
textsize,
plot_kwargs,
fill_kwargs,
rug_kwargs,
contour_kwargs,
contourf_kwargs,
pcolormesh_kwargs,
hist_kwargs,
ax,
backend_kwargs,
show,
):
"""Matplotlib distplot."""
if backend_kwargs is not None:
warnings.warn(
(
"Argument backend_kwargs has not effect in matplotlib.plot_dist"
"Supplied value won't be used"
)
)
backend_kwargs = None
if ax is None:
ax = plt.gca()
if kind == "hist":
ax = _histplot_mpl_op(
values=values, values2=values2, rotated=rotated, ax=ax, hist_kwargs=hist_kwargs
)
elif kind == "kde":
plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
plot_kwargs.setdefault("color", color)
legend = label is not None
ax = plot_kde(
values,
values2,
cumulative=cumulative,
rug=rug,
label=label,
bw=bw,
quantiles=quantiles,
rotated=rotated,
contour=contour,
legend=legend,
fill_last=fill_last,
textsize=textsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
contour_kwargs=contour_kwargs,
contourf_kwargs=contourf_kwargs,
pcolormesh_kwargs=pcolormesh_kwargs,
ax=ax,
backend="matplotlib",
backend_kwargs=backend_kwargs,
show=show,
)
if backend_show(show):
plt.show()
return ax
def _histplot_mpl_op(values, values2, rotated, ax, hist_kwargs):
"""Add a histogram for the data to the axes."""
if values2 is not None:
raise NotImplementedError("Insert hexbin plot here")
bins = hist_kwargs.pop("bins")
if bins is None:
bins = get_bins(values)
ax.hist(np.asarray(values).flatten(), bins=bins, **hist_kwargs)
if rotated:
ax.set_yticks(bins[:-1])
else:
ax.set_xticks(bins[:-1])
if hist_kwargs.get("label") is not None:
ax.legend()
return ax
| 24.301887 | 91 | 0.588898 | import warnings
import matplotlib.pyplot as plt
import numpy as np
from . import backend_show
from ...kdeplot import plot_kde
from ...plot_utils import matplotlib_kwarg_dealiaser
from ....numeric_utils import get_bins
def plot_dist(
values,
values2,
color,
kind,
cumulative,
label,
rotated,
rug,
bw,
quantiles,
contour,
fill_last,
textsize,
plot_kwargs,
fill_kwargs,
rug_kwargs,
contour_kwargs,
contourf_kwargs,
pcolormesh_kwargs,
hist_kwargs,
ax,
backend_kwargs,
show,
):
if backend_kwargs is not None:
warnings.warn(
(
"Argument backend_kwargs has not effect in matplotlib.plot_dist"
"Supplied value won't be used"
)
)
backend_kwargs = None
if ax is None:
ax = plt.gca()
if kind == "hist":
ax = _histplot_mpl_op(
values=values, values2=values2, rotated=rotated, ax=ax, hist_kwargs=hist_kwargs
)
elif kind == "kde":
plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
plot_kwargs.setdefault("color", color)
legend = label is not None
ax = plot_kde(
values,
values2,
cumulative=cumulative,
rug=rug,
label=label,
bw=bw,
quantiles=quantiles,
rotated=rotated,
contour=contour,
legend=legend,
fill_last=fill_last,
textsize=textsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
contour_kwargs=contour_kwargs,
contourf_kwargs=contourf_kwargs,
pcolormesh_kwargs=pcolormesh_kwargs,
ax=ax,
backend="matplotlib",
backend_kwargs=backend_kwargs,
show=show,
)
if backend_show(show):
plt.show()
return ax
def _histplot_mpl_op(values, values2, rotated, ax, hist_kwargs):
if values2 is not None:
raise NotImplementedError("Insert hexbin plot here")
bins = hist_kwargs.pop("bins")
if bins is None:
bins = get_bins(values)
ax.hist(np.asarray(values).flatten(), bins=bins, **hist_kwargs)
if rotated:
ax.set_yticks(bins[:-1])
else:
ax.set_xticks(bins[:-1])
if hist_kwargs.get("label") is not None:
ax.legend()
return ax
| true | true |
f71c01c109a11f63936b4d9b8326f9b1b1a2d93f | 452 | py | Python | cookies/ex2/main.py | acandreani/ads_web_exercicios | a97ee7ebd0dba9e308b8e2d2318e577903f83f72 | [
"MIT"
] | 1 | 2019-03-13T14:33:28.000Z | 2019-03-13T14:33:28.000Z | cookies/ex2/main.py | acandreani/ads_web_exercicios | a97ee7ebd0dba9e308b8e2d2318e577903f83f72 | [
"MIT"
] | 1 | 2021-06-23T20:56:49.000Z | 2021-06-23T20:56:49.000Z | cookies/ex2/main.py | acandreani/ads_web_exercicios | a97ee7ebd0dba9e308b8e2d2318e577903f83f72 | [
"MIT"
] | 1 | 2019-04-24T13:10:58.000Z | 2019-04-24T13:10:58.000Z | from flask import Flask, render_template, request, session,redirect, url_for, escape, request, make_response
app = Flask(__name__)
# configure a chave secreta
app.secret_key = "segredo"
@app.route('/')
def index():
return render_template("index.html")
# use cookies.get(key) instead of cookies[key] to not get a
# KeyError if the cookie is missing.
if __name__== "__main__":
app.run(host="0.0.0.0",debug= True)
| 22.6 | 110 | 0.679204 | from flask import Flask, render_template, request, session,redirect, url_for, escape, request, make_response
app = Flask(__name__)
app.secret_key = "segredo"
@app.route('/')
def index():
return render_template("index.html")
if __name__== "__main__":
app.run(host="0.0.0.0",debug= True)
| true | true |
f71c0215c62089a43b38c7a560870eff98f3266c | 540 | py | Python | Exercicios-Python/exercicios-curso-em-video/d004.py | PedroGoes16/Estudos | 142a697a1d375590bb76847a74ed2b8f9fa44a9d | [
"MIT"
] | null | null | null | Exercicios-Python/exercicios-curso-em-video/d004.py | PedroGoes16/Estudos | 142a697a1d375590bb76847a74ed2b8f9fa44a9d | [
"MIT"
] | null | null | null | Exercicios-Python/exercicios-curso-em-video/d004.py | PedroGoes16/Estudos | 142a697a1d375590bb76847a74ed2b8f9fa44a9d | [
"MIT"
] | null | null | null | n = input('Digite algo: ')
print('É composto por número e letras? ',n.isalnum())
print('É composto somente por letras maiúsculas? ',n.isupper())
print('É composto somente por letras? ',n.isalpha())
print('É composto somente por números? ',n.isnumeric())
print('É um número decimal? ',n.isdecimal())
print('É composto somente por dígitos? ',n.isdigit())
print('É composto somente por letras minúsculas? ',n.islower())
print('É imprimível? ',n.isprintable())
print('É somente um espaço vazio? ',n.isspace())
print('É um título? ',n.istitle()) | 49.090909 | 63 | 0.709259 | n = input('Digite algo: ')
print('É composto por número e letras? ',n.isalnum())
print('É composto somente por letras maiúsculas? ',n.isupper())
print('É composto somente por letras? ',n.isalpha())
print('É composto somente por números? ',n.isnumeric())
print('É um número decimal? ',n.isdecimal())
print('É composto somente por dígitos? ',n.isdigit())
print('É composto somente por letras minúsculas? ',n.islower())
print('É imprimível? ',n.isprintable())
print('É somente um espaço vazio? ',n.isspace())
print('É um título? ',n.istitle()) | true | true |
f71c027cc59d99ded5c1d7d4bb2ac9fc391c9882 | 9,319 | py | Python | squares.py | IAmUnStTV/Tetris-Python | 4a676b6f72ceabce796592611f2541665e4010be | [
"Apache-2.0"
] | 16 | 2019-03-20T12:33:53.000Z | 2021-10-16T12:13:39.000Z | squares.py | IAmUnStTV/Tetris-Python | 4a676b6f72ceabce796592611f2541665e4010be | [
"Apache-2.0"
] | 2 | 2019-04-01T14:07:07.000Z | 2021-07-15T14:08:58.000Z | squares.py | IAmUnStTV/Tetris-Python | 4a676b6f72ceabce796592611f2541665e4010be | [
"Apache-2.0"
] | 12 | 2019-03-30T11:32:30.000Z | 2021-09-15T02:49:00.000Z | from random import randrange
from pygame import Rect, draw
from clock import Clock
class Squares:
"""method for malipulating squares in the game"""
def __init__(self, st, status, screen):
self.st = st
self.status = status
self.screen = screen
self.empty_line = ['none' for i in range(st.square_num_x)]
self.squares = [self.empty_line.copy() for i in range(st.square_num_y)]
self.new_sq(self)
self.clock = Clock(st)
# draw all squares
def draw_squares(self):
self.screen.fill(self.st.space_color)
self.draw_tip(self)
self.draw_exist_sq(self)
self.draw_curr_sq(self)
# update squares' information
def update(self):
updated = False # for update screen
# vertical drop, straight drop
if self.status.straight_drop and self.clock.is_time_to_straight_drop():
updated = True
self.drop_straight(self)
self.clock.update_straight_drop()
# vertical drop, force drop
elif self.clock.is_time_to_drop():
updated = True
self.drop(self)
self.clock.update_drop()
# vertical drop, quick drop
elif self.status.down and self.clock.is_time_to_quick_drop():
updated = True
self.drop(self)
self.clock.update_quick_drop()
# rotation
if self.status.rotate and self.clock.is_time_to_rotate():
updated = True
self.rotate(self)
self.clock.update_rotate()
# horizontal move
if self.status.right:
updated = True
if self.clock.is_time_to_move() or self.clock.is_time_to_quick_right():
self.right(self)
self.clock.update_move()
if self.status.left:
updated = True
if self.clock.is_time_to_move() or self.clock.is_time_to_quick_left():
self.left(self)
self.clock.update_move()
# crash detection
if self.should_stop(self):
updated = True
self.stop(self)
return updated
# renew current square
@staticmethod
def new_sq(self):
self.curr_sq = self.st.new.copy()
shape = self.get_shape(self)
self.origin_shape = shape['pos']
self.curr_shape = shape['pos']
self.curr_color = shape['color']
self.rotate_limit = shape['rotate']
self.rotate_curr = 1
# if new squares are crashed, game over.
if not self.valid(self, self.curr_sq, self.curr_shape):
self.status.game_status = self.status.GAMEOVER
# return a random shape dictionary
@staticmethod
def get_shape(self):
shape_index = randrange(0, self.st.shape_num)
return self.st.shapes[shape_index].copy()
@staticmethod
def drop_straight(self):
while not self.should_stop(self):
self.curr_sq[0] += 1
@staticmethod
def drop(self):
new_sq = self.curr_sq.copy()
new_sq[0] += 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def rotate(self):
new_shape = self.get_rotated_shape(self)
# regular check
if self.valid(self, self.curr_sq, new_shape):
self.curr_shape = new_shape
# move horizontally if not valid
else:
tolerance = 2
for i in range(tolerance):
# left
new_sq_left = self.curr_sq.copy()
new_sq_left[1] -= 1
if self.valid(self, new_sq_left, new_shape):
self.curr_sq = new_sq_left
self.curr_shape = new_shape
return
# right
new_sq_right = self.curr_sq.copy()
new_sq_right[1] += 1
if self.valid(self, new_sq_right, new_shape):
self.curr_sq = new_sq_right
self.curr_shape = new_shape
return
@staticmethod
def get_rotated_shape(self):
# rotation limit must not exceed, if exceed, reset it
if self.rotate_curr >= self.rotate_limit:
self.rotate_curr = 1
new_shape = self.origin_shape
else:
self.rotate_curr += 1
new_shape = []
for sq in self.curr_shape:
new_shape.append([sq[1], -sq[0]])
return new_shape
@staticmethod
def right(self):
new_sq = self.curr_sq.copy()
new_sq[1] += 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def left(self):
new_sq = self.curr_sq.copy()
new_sq[1] -= 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def stop(self):
# wait for a moment before stop, give player time to adjust
if not self.clock.is_time_to_stop():
self.clock.update_should_stop(True)
return
else:
self.clock.update_should_stop(None)
self.clock.update_stop()
# copy squares to map
for sq in self.curr_shape:
x = sq[1] + self.curr_sq[1]
y = sq[0] + self.curr_sq[0]
if y >= 0:
self.squares[y][x] = self.curr_color
x = self.curr_sq[1]
y = self.curr_sq[0]
if y >= 0:
self.squares[y][x] = self.curr_color
full_lines = self.clean_full_lines(self)
self.status.score += full_lines # add score
self.new_sq(self)
# delete full lines and insert empty lines at the front
@staticmethod
def clean_full_lines(self):
full_lines = 0
for index, line in enumerate(self.squares):
if line.count('none') == 0:
full_lines += 1
self.st.time_drop *= self.st.time_drop_adjust # adjust time
self.squares.pop(index)
self.squares.insert(0, self.empty_line.copy())
return full_lines
# validate current squares of shapes relative to center with with one drop vertically
@staticmethod
def should_stop(self):
# check shape squares
for sq in self.curr_shape:
x = sq[1] + self.curr_sq[1]
y = sq[0] + self.curr_sq[0] + 1
if y - 1 >= 0 and not self.valid_sq(self, [y, x]):
return True
# check center square
x = self.curr_sq[1]
y = self.curr_sq[0] + 1
return not (self.valid_sq(self, [y, x]))
# validate the given center square and shape squires relative to center square
@staticmethod
def valid(self, square, shape):
# check shape squares
for sq in shape:
x = sq[1] + square[1]
y = sq[0] + square[0]
if y >= 0 and not (self.valid_sq(self, [y, x])):
return False
# check center square
return self.valid_sq(self, square)
@staticmethod
def valid_sq(self, sq):
# check border
if sq[0] >= self.st.square_num_y or \
sq[1] >= self.st.square_num_x or \
sq[1] < 0:
return False
# check crash
return self.squares[sq[0]][sq[1]] == 'none'
@staticmethod
def draw_exist_sq(self):
for y, row in enumerate(self.squares):
for x, square in enumerate(row):
color = self.st.colors[self.squares[y][x]]
self.draw_square(self, y, x, color)
@staticmethod
def draw_tip(self):
# find the lowrest position
curr_sq = self.curr_sq.copy()
while not self.should_stop(self):
self.curr_sq[0] += 1
curr_sq, self.curr_sq = self.curr_sq, curr_sq
# draw their tips
color = self.st.colors['tip']
self.draw_square(self, curr_sq[0], curr_sq[1], color, True)
self.draw_square(self, curr_sq[0], curr_sq[1], self.st.colors['none'])
for y, x in self.curr_shape:
curr_y, curr_x = curr_sq[0], curr_sq[1]
self.draw_square(self, y + curr_y, x + curr_x, color, True)
self.draw_square(self, y + curr_y, x + curr_x, self.st.colors['none'])
@staticmethod
def draw_curr_sq(self):
# draw center
color = self.st.colors[self.curr_color]
self.draw_square(self, self.curr_sq[0], self.curr_sq[1], color)
# draw shapes
curr_y, curr_x = self.curr_sq[0], self.curr_sq[1]
for y, x in self.curr_shape:
self.draw_square(self, y + curr_y, x + curr_x, color)
# draw one single square with given information
@staticmethod
def draw_square(self, y, x, color, border=False):
x_pos = x * (self.st.square_space + self.st.square_length)
y_pos = y * (self.st.square_space + self.st.square_length)
length = self.st.square_length
# adding borders borders
if border:
y_pos -= self.st.square_space
x_pos -= self.st.square_space
length += 2 * self.st.square_space
rect = Rect(x_pos + self.st.square_space, y_pos + self.st.square_space, length, length)
draw.rect(self.screen, color, rect) | 35.166038 | 95 | 0.570877 | from random import randrange
from pygame import Rect, draw
from clock import Clock
class Squares:
def __init__(self, st, status, screen):
self.st = st
self.status = status
self.screen = screen
self.empty_line = ['none' for i in range(st.square_num_x)]
self.squares = [self.empty_line.copy() for i in range(st.square_num_y)]
self.new_sq(self)
self.clock = Clock(st)
def draw_squares(self):
self.screen.fill(self.st.space_color)
self.draw_tip(self)
self.draw_exist_sq(self)
self.draw_curr_sq(self)
def update(self):
updated = False # for update screen
# vertical drop, straight drop
if self.status.straight_drop and self.clock.is_time_to_straight_drop():
updated = True
self.drop_straight(self)
self.clock.update_straight_drop()
# vertical drop, force drop
elif self.clock.is_time_to_drop():
updated = True
self.drop(self)
self.clock.update_drop()
# vertical drop, quick drop
elif self.status.down and self.clock.is_time_to_quick_drop():
updated = True
self.drop(self)
self.clock.update_quick_drop()
# rotation
if self.status.rotate and self.clock.is_time_to_rotate():
updated = True
self.rotate(self)
self.clock.update_rotate()
# horizontal move
if self.status.right:
updated = True
if self.clock.is_time_to_move() or self.clock.is_time_to_quick_right():
self.right(self)
self.clock.update_move()
if self.status.left:
updated = True
if self.clock.is_time_to_move() or self.clock.is_time_to_quick_left():
self.left(self)
self.clock.update_move()
# crash detection
if self.should_stop(self):
updated = True
self.stop(self)
return updated
# renew current square
@staticmethod
def new_sq(self):
self.curr_sq = self.st.new.copy()
shape = self.get_shape(self)
self.origin_shape = shape['pos']
self.curr_shape = shape['pos']
self.curr_color = shape['color']
self.rotate_limit = shape['rotate']
self.rotate_curr = 1
# if new squares are crashed, game over.
if not self.valid(self, self.curr_sq, self.curr_shape):
self.status.game_status = self.status.GAMEOVER
# return a random shape dictionary
@staticmethod
def get_shape(self):
shape_index = randrange(0, self.st.shape_num)
return self.st.shapes[shape_index].copy()
@staticmethod
def drop_straight(self):
while not self.should_stop(self):
self.curr_sq[0] += 1
@staticmethod
def drop(self):
new_sq = self.curr_sq.copy()
new_sq[0] += 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def rotate(self):
new_shape = self.get_rotated_shape(self)
# regular check
if self.valid(self, self.curr_sq, new_shape):
self.curr_shape = new_shape
# move horizontally if not valid
else:
tolerance = 2
for i in range(tolerance):
# left
new_sq_left = self.curr_sq.copy()
new_sq_left[1] -= 1
if self.valid(self, new_sq_left, new_shape):
self.curr_sq = new_sq_left
self.curr_shape = new_shape
return
# right
new_sq_right = self.curr_sq.copy()
new_sq_right[1] += 1
if self.valid(self, new_sq_right, new_shape):
self.curr_sq = new_sq_right
self.curr_shape = new_shape
return
@staticmethod
def get_rotated_shape(self):
# rotation limit must not exceed, if exceed, reset it
if self.rotate_curr >= self.rotate_limit:
self.rotate_curr = 1
new_shape = self.origin_shape
else:
self.rotate_curr += 1
new_shape = []
for sq in self.curr_shape:
new_shape.append([sq[1], -sq[0]])
return new_shape
@staticmethod
def right(self):
new_sq = self.curr_sq.copy()
new_sq[1] += 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def left(self):
new_sq = self.curr_sq.copy()
new_sq[1] -= 1
if self.valid(self, new_sq, self.curr_shape):
self.curr_sq = new_sq
@staticmethod
def stop(self):
# wait for a moment before stop, give player time to adjust
if not self.clock.is_time_to_stop():
self.clock.update_should_stop(True)
return
else:
self.clock.update_should_stop(None)
self.clock.update_stop()
# copy squares to map
for sq in self.curr_shape:
x = sq[1] + self.curr_sq[1]
y = sq[0] + self.curr_sq[0]
if y >= 0:
self.squares[y][x] = self.curr_color
x = self.curr_sq[1]
y = self.curr_sq[0]
if y >= 0:
self.squares[y][x] = self.curr_color
full_lines = self.clean_full_lines(self)
self.status.score += full_lines # add score
self.new_sq(self)
# delete full lines and insert empty lines at the front
@staticmethod
def clean_full_lines(self):
full_lines = 0
for index, line in enumerate(self.squares):
if line.count('none') == 0:
full_lines += 1
self.st.time_drop *= self.st.time_drop_adjust # adjust time
self.squares.pop(index)
self.squares.insert(0, self.empty_line.copy())
return full_lines
# validate current squares of shapes relative to center with with one drop vertically
@staticmethod
def should_stop(self):
# check shape squares
for sq in self.curr_shape:
x = sq[1] + self.curr_sq[1]
y = sq[0] + self.curr_sq[0] + 1
if y - 1 >= 0 and not self.valid_sq(self, [y, x]):
return True
# check center square
x = self.curr_sq[1]
y = self.curr_sq[0] + 1
return not (self.valid_sq(self, [y, x]))
# validate the given center square and shape squires relative to center square
@staticmethod
def valid(self, square, shape):
# check shape squares
for sq in shape:
x = sq[1] + square[1]
y = sq[0] + square[0]
if y >= 0 and not (self.valid_sq(self, [y, x])):
return False
# check center square
return self.valid_sq(self, square)
@staticmethod
def valid_sq(self, sq):
# check border
if sq[0] >= self.st.square_num_y or \
sq[1] >= self.st.square_num_x or \
sq[1] < 0:
return False
# check crash
return self.squares[sq[0]][sq[1]] == 'none'
@staticmethod
def draw_exist_sq(self):
for y, row in enumerate(self.squares):
for x, square in enumerate(row):
color = self.st.colors[self.squares[y][x]]
self.draw_square(self, y, x, color)
@staticmethod
def draw_tip(self):
# find the lowrest position
curr_sq = self.curr_sq.copy()
while not self.should_stop(self):
self.curr_sq[0] += 1
curr_sq, self.curr_sq = self.curr_sq, curr_sq
# draw their tips
color = self.st.colors['tip']
self.draw_square(self, curr_sq[0], curr_sq[1], color, True)
self.draw_square(self, curr_sq[0], curr_sq[1], self.st.colors['none'])
for y, x in self.curr_shape:
curr_y, curr_x = curr_sq[0], curr_sq[1]
self.draw_square(self, y + curr_y, x + curr_x, color, True)
self.draw_square(self, y + curr_y, x + curr_x, self.st.colors['none'])
@staticmethod
def draw_curr_sq(self):
# draw center
color = self.st.colors[self.curr_color]
self.draw_square(self, self.curr_sq[0], self.curr_sq[1], color)
# draw shapes
curr_y, curr_x = self.curr_sq[0], self.curr_sq[1]
for y, x in self.curr_shape:
self.draw_square(self, y + curr_y, x + curr_x, color)
# draw one single square with given information
@staticmethod
def draw_square(self, y, x, color, border=False):
x_pos = x * (self.st.square_space + self.st.square_length)
y_pos = y * (self.st.square_space + self.st.square_length)
length = self.st.square_length
# adding borders borders
if border:
y_pos -= self.st.square_space
x_pos -= self.st.square_space
length += 2 * self.st.square_space
rect = Rect(x_pos + self.st.square_space, y_pos + self.st.square_space, length, length)
draw.rect(self.screen, color, rect) | true | true |
f71c030abc2d29b0f256a337f0e78e71b90e4000 | 3,839 | py | Python | spirit/topic/views.py | BinaryTree0/fer3 | 85c3bbf2f328e69ad4d7c01b6e2c8d4ef1d9e0a3 | [
"MIT"
] | null | null | null | spirit/topic/views.py | BinaryTree0/fer3 | 85c3bbf2f328e69ad4d7c01b6e2c8d4ef1d9e0a3 | [
"MIT"
] | 5 | 2021-06-08T21:03:58.000Z | 2022-03-12T00:18:43.000Z | spirit/topic/views.py | BinaryTree0/fer3 | 85c3bbf2f328e69ad4d7c01b6e2c8d4ef1d9e0a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponsePermanentRedirect
from djconfig import config
from ..core.utils.views import is_post, post_data
from ..core.utils.paginator import paginate, yt_paginate
from ..core.utils.ratelimit.decorators import ratelimit
from ..category.models import Category
from ..comment.models import MOVED
from ..comment.forms import CommentForm
from ..comment.utils import comment_posted
from ..comment.models import Comment
from .models import Topic
from .forms import TopicForm
from . import utils
@login_required
@ratelimit(rate='1/10s')
def publish(request, category_id=None):
if category_id:
get_object_or_404(
Category.objects.visible(),
pk=category_id)
user = request.user
form = TopicForm(
user=user,
data=post_data(request),
initial={'category': category_id})
cform = CommentForm(
user=user,
data=post_data(request))
if (is_post(request) and
all([form.is_valid(), cform.is_valid()]) and
not request.is_limited()):
if not user.st.update_post_hash(form.get_topic_hash()):
return redirect(
request.POST.get('next', None) or
form.get_category().get_absolute_url())
# wrap in transaction.atomic?
topic = form.save()
cform.topic = topic
comment = cform.save()
comment_posted(comment=comment, mentions=cform.mentions)
return redirect(topic.get_absolute_url())
return render(
request=request,
template_name='spirit/topic/publish.html',
context={'form': form, 'cform': cform})
@login_required
def update(request, pk):
topic = Topic.objects.for_update_or_404(pk, request.user)
category_id = topic.category_id
form = TopicForm(
user=request.user,
data=post_data(request),
instance=topic)
if is_post(request) and form.is_valid():
topic = form.save()
if topic.category_id != category_id:
Comment.create_moderation_action(
user=request.user, topic=topic, action=MOVED)
return redirect(request.POST.get('next', topic.get_absolute_url()))
return render(
request=request,
template_name='spirit/topic/update.html',
context={'form': form})
def detail(request, pk, slug):
topic = Topic.objects.get_public_or_404(pk, request.user)
if topic.slug != slug:
return HttpResponsePermanentRedirect(topic.get_absolute_url())
utils.topic_viewed(request=request, topic=topic)
comments = (
Comment.objects
.for_topic(topic=topic)
.with_likes(user=request.user)
.with_polls(user=request.user)
.order_by('date'))
comments = paginate(
comments,
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/detail.html',
context={
'topic': topic,
'comments': comments})
def index_active(request):
categories = (
Category.objects
.visible()
.parents())
topics = (
Topic.objects
.visible()
.global_()
.with_bookmarks(user=request.user)
.order_by('-is_globally_pinned', '-last_active')
.select_related('category'))
topics = yt_paginate(
topics,
per_page=config.topics_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/active.html',
context={
'categories': categories,
'topics': topics})
| 29.75969 | 75 | 0.64496 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponsePermanentRedirect
from djconfig import config
from ..core.utils.views import is_post, post_data
from ..core.utils.paginator import paginate, yt_paginate
from ..core.utils.ratelimit.decorators import ratelimit
from ..category.models import Category
from ..comment.models import MOVED
from ..comment.forms import CommentForm
from ..comment.utils import comment_posted
from ..comment.models import Comment
from .models import Topic
from .forms import TopicForm
from . import utils
@login_required
@ratelimit(rate='1/10s')
def publish(request, category_id=None):
if category_id:
get_object_or_404(
Category.objects.visible(),
pk=category_id)
user = request.user
form = TopicForm(
user=user,
data=post_data(request),
initial={'category': category_id})
cform = CommentForm(
user=user,
data=post_data(request))
if (is_post(request) and
all([form.is_valid(), cform.is_valid()]) and
not request.is_limited()):
if not user.st.update_post_hash(form.get_topic_hash()):
return redirect(
request.POST.get('next', None) or
form.get_category().get_absolute_url())
topic = form.save()
cform.topic = topic
comment = cform.save()
comment_posted(comment=comment, mentions=cform.mentions)
return redirect(topic.get_absolute_url())
return render(
request=request,
template_name='spirit/topic/publish.html',
context={'form': form, 'cform': cform})
@login_required
def update(request, pk):
topic = Topic.objects.for_update_or_404(pk, request.user)
category_id = topic.category_id
form = TopicForm(
user=request.user,
data=post_data(request),
instance=topic)
if is_post(request) and form.is_valid():
topic = form.save()
if topic.category_id != category_id:
Comment.create_moderation_action(
user=request.user, topic=topic, action=MOVED)
return redirect(request.POST.get('next', topic.get_absolute_url()))
return render(
request=request,
template_name='spirit/topic/update.html',
context={'form': form})
def detail(request, pk, slug):
topic = Topic.objects.get_public_or_404(pk, request.user)
if topic.slug != slug:
return HttpResponsePermanentRedirect(topic.get_absolute_url())
utils.topic_viewed(request=request, topic=topic)
comments = (
Comment.objects
.for_topic(topic=topic)
.with_likes(user=request.user)
.with_polls(user=request.user)
.order_by('date'))
comments = paginate(
comments,
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/detail.html',
context={
'topic': topic,
'comments': comments})
def index_active(request):
categories = (
Category.objects
.visible()
.parents())
topics = (
Topic.objects
.visible()
.global_()
.with_bookmarks(user=request.user)
.order_by('-is_globally_pinned', '-last_active')
.select_related('category'))
topics = yt_paginate(
topics,
per_page=config.topics_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/active.html',
context={
'categories': categories,
'topics': topics})
| true | true |
f71c03b58cbe884de8ef7d23450e58f05962f59e | 1,242 | py | Python | src/alocacao/camada_servicos/unit_of_work.py | ralphribeiro/APWP-T2 | 1ed5552a32ae9320eadbbd0489c2082a6f8750a8 | [
"MIT"
] | null | null | null | src/alocacao/camada_servicos/unit_of_work.py | ralphribeiro/APWP-T2 | 1ed5552a32ae9320eadbbd0489c2082a6f8750a8 | [
"MIT"
] | null | null | null | src/alocacao/camada_servicos/unit_of_work.py | ralphribeiro/APWP-T2 | 1ed5552a32ae9320eadbbd0489c2082a6f8750a8 | [
"MIT"
] | null | null | null | from __future__ import annotations
import abc
from alocacao.adapters import repository
from alocacao.config import DEFAULT_SESSION_FACTORY
class AbstractUOW(abc.ABC):
produtos: repository.AbstractRepository
def __enter__(self) -> AbstractUOW:
return self
def __exit__(self, *args):
self.rollback()
def commit(self):
self._commit()
def collect_new_messages(self):
for produto in self.produtos.seen:
while produto.eventos:
yield produto.eventos.pop(0)
@abc.abstractmethod
def _commit(self):
pass
@abc.abstractmethod
def rollback(self):
pass
class SQLAlchemyUOW(AbstractUOW):
def __init__(self, session_factory=DEFAULT_SESSION_FACTORY):
self.session_factory = session_factory
def __enter__(self):
self.session = self.session_factory()
self.produtos = repository.TrackingRepository(
repository.SQLAlchemyRepository(self.session)
)
return super().__enter__()
def __exit__(self, *args):
super().__exit__(*args)
self.session.close()
def _commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
| 23 | 64 | 0.661031 | from __future__ import annotations
import abc
from alocacao.adapters import repository
from alocacao.config import DEFAULT_SESSION_FACTORY
class AbstractUOW(abc.ABC):
produtos: repository.AbstractRepository
def __enter__(self) -> AbstractUOW:
return self
def __exit__(self, *args):
self.rollback()
def commit(self):
self._commit()
def collect_new_messages(self):
for produto in self.produtos.seen:
while produto.eventos:
yield produto.eventos.pop(0)
@abc.abstractmethod
def _commit(self):
pass
@abc.abstractmethod
def rollback(self):
pass
class SQLAlchemyUOW(AbstractUOW):
def __init__(self, session_factory=DEFAULT_SESSION_FACTORY):
self.session_factory = session_factory
def __enter__(self):
self.session = self.session_factory()
self.produtos = repository.TrackingRepository(
repository.SQLAlchemyRepository(self.session)
)
return super().__enter__()
def __exit__(self, *args):
super().__exit__(*args)
self.session.close()
def _commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
| true | true |
f71c045146eab77490951be322d1c2d7585e636d | 1,143 | py | Python | ISMLnextGen/ipTest.py | Ravenclaw-OIer/ISML_auto_voter | 9c53bd87530697d374163f571186542c3fc9734b | [
"MIT"
] | 128 | 2020-11-16T09:28:17.000Z | 2022-03-14T10:38:52.000Z | ISMLnextGen/ipTest.py | Ravenclaw-OIer/ISML_auto_voter | 9c53bd87530697d374163f571186542c3fc9734b | [
"MIT"
] | 7 | 2020-11-27T14:45:19.000Z | 2022-02-15T09:47:12.000Z | ISMLnextGen/ipTest.py | Ravenclaw-OIer/ISML_auto_voter | 9c53bd87530697d374163f571186542c3fc9734b | [
"MIT"
] | 11 | 2020-12-11T12:24:38.000Z | 2022-02-20T12:42:31.000Z | #coding:utf-8
#访问这个服务器会获得一些'ip:端口'字符串。仅用于测试
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
self.write('''
<!DOCTYPE html><html>
<head><meta charset="utf-8" />
<title>html<br>标签换行符详细介绍</title></head>
<body bgcolor="burlywood">
<p>我是一个段落。
<br>我是一个段落。<br/>
我是一个段落。</p>
<p>
<br>192.168.1.1:99999\n<br/>
<br>192.168.1.1:91241\n<br/>
<br>192.168.1.1:91343\n<br/>
<br>192.168.1.1:94223\n<br/>
<br>192.168.1.1:97546\n<br/>
<br>192.168.1.1:92342\n<br/>
</p>
</body></html>
''')
app=tornado.web.Application([
(r'/',MainHandler),
])
if __name__ == '__main__':
print('访问这个服务器:55556会获得一些“ip:端口”字符串。仅用于测试')
app.listen(55556)
tornado.ioloop.IOLoop.instance().start()
| 30.891892 | 59 | 0.451444 |
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
self.write('''
<!DOCTYPE html><html>
<head><meta charset="utf-8" />
<title>html<br>标签换行符详细介绍</title></head>
<body bgcolor="burlywood">
<p>我是一个段落。
<br>我是一个段落。<br/>
我是一个段落。</p>
<p>
<br>192.168.1.1:99999\n<br/>
<br>192.168.1.1:91241\n<br/>
<br>192.168.1.1:91343\n<br/>
<br>192.168.1.1:94223\n<br/>
<br>192.168.1.1:97546\n<br/>
<br>192.168.1.1:92342\n<br/>
</p>
</body></html>
''')
app=tornado.web.Application([
(r'/',MainHandler),
])
if __name__ == '__main__':
print('访问这个服务器:55556会获得一些“ip:端口”字符串。仅用于测试')
app.listen(55556)
tornado.ioloop.IOLoop.instance().start()
| true | true |
f71c05e483d33dc4be29042dc38a37ccadce4386 | 6,756 | py | Python | Sensors/softskin.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | Sensors/softskin.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | null | null | null | Sensors/softskin.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z | import serial
import serial.tools.list_ports
import numpy as np
import math
import threading
import re
import os
import sys
import time
import matplotlib.pyplot as plt
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
data_path = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".." +
os.path.sep + "data")
def print_serial(port):
print("---------------[ %s ]---------------" % port.name)
print("Path: %s" % port.device)
print("Descript: %s" % port.description)
print("HWID: %s" % port.hwid)
if not None == port.manufacturer:
print("Manufacture: %s" % port.manufacturer)
if not None == port.product:
print("Product: %s" % port.product)
if not None == port.interface:
print("Interface: %s" % port.interface)
print()
def detect_serials(location="1-1.1:1.0", vid=0x10c4, pid=0xea60):
ports = serial.tools.list_ports.comports()
for port in ports:
print_serial(port)
if port.location.__contains__(location):
port_path = port.device
return port_path
else:
print("Cannot find the target device: %s" % location)
return None
class SoftSkin(object):
def __init__(self, is_STM32: bool = True):
port_name = detect_serials("1-1.3:1.0") # Arduino Mega 2560 ttyACM0
baud_rate = 115200
print(port_name, baud_rate)
self.serial = serial.Serial(port_name, baud_rate, timeout=None)
self.pwd = os.path.abspath(os.path.abspath(__file__))
self.father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
self.serial = serial.Serial(port_name, baud_rate, timeout=None)
self.raw_data = [] # 保存一帧数据
self.base_data = [] # 建立一组基准值用于初始化
self.temp_data = []
self.port_num = 32
self.average_length = 10
self.average_buffer = np.zeros((self.average_length, self.port_num))
# detect abnormal signal
self.max_pressure = 0
self.safe_change_rate = 10
self.emergency_change_rate = 50
self.detect_length = 10
self.detect_buffer = np.zeros((self.detect_length, self.port_num))
self.skin_unlock_event = threading.Event()
self.skin_unlock_event.clear()
self.build_base_line_data()
pass
def read_data(self, is_shown=1):
try:
one_line_data = self.serial.readline().decode("utf-8")
# print(one_line_data)
one_line_data = one_line_data.strip('SS')
one_line_data = one_line_data.strip('\n')
one_line_data = one_line_data.strip('\r')
one_line_data = one_line_data.split('|')
# print(one_line_data)
if is_shown == 1:
print(one_line_data)
if len(one_line_data) == self.port_num:
one_line_data = list(map(float, one_line_data))
one_line_data = list(map(int, one_line_data))
self.raw_data = one_line_data
# print(self.raw_data, type(self.raw_data), type(self.raw_data[0]))
except BaseException as be:
print("Data Error:", be)
def build_base_line_data(self, initial_size=10):
"""
expired, no use
1.建立一组基准数值
检测异常值
取平均值
:return:
not in use because the original signals are stable enough
"""
base_list = []
for i in range(initial_size):
self.read_data(0)
if len(self.raw_data) == self.port_num:
temp_raw_data = self.raw_data
base_list += temp_raw_data
mean_base_list = np.array(base_list).reshape([-1, self.port_num])
add_col = np.ones(mean_base_list.shape[0]).reshape([1, -1])
mean_base_list = add_col.dot(mean_base_list) / mean_base_list.shape[0]
self.base_data = mean_base_list.tolist()[0]
self.base_data = list(map(lambda x: int(x) - 1, self.base_data))
print("base line data: ", self.base_data)
pass
def read_and_record(self, record=False, show=False, plot=False, plot_num=30):
file_path = data_path + os.path.sep + "Softskin.txt"
plot_array = np.zeros((plot_num, self.port_num))
if record:
file = open(file_path, 'w')
while True:
try:
# self.serial.flushInput()
self.read_data(0)
if len(self.raw_data) == len(self.base_data):
temp_data = np.array(self.raw_data) - np.array(self.base_data)
if show:
print(temp_data)
print(self.max_pressure)
if record:
time_index = time.time()
write_data = temp_data.tolist()
write_data.insert(0, time_index)
file.write(str(write_data) + '\n')
file.flush()
self.temp_data = temp_data
self.max_pressure = self.temp_data.max()
self.detect_buffer[0:-1, :] = self.detect_buffer[1:self.detect_length, :]
self.detect_buffer[-1, :] = np.array(self.temp_data)
if plot:
# plt.ion()
plot_array[0:plot_num - 1, :] = plot_array[1:plot_num, :]
plot_array[plot_num - 1, :] = np.array(temp_data)
plt.clf()
plt.xlabel('Time')
plt.ylabel('pressure')
plt.ylim((-10, 270))
plt.plot(range(0, plot_num), plot_array)
# plt.ioff()
# plt.show()
# plt.draw()
plt.pause(0.0000000001)
except BaseException as be:
print("Data Error:", be)
def update_from_STM32(self, STM32_data: np.ndarray):
try:
self.raw_data = STM32_data
except:
pass
def unlock(self):
while True:
change_rate = self.detect_buffer[-1, :] - self.detect_buffer[0, :]
change_rate = change_rate.max()
if self.safe_change_rate <= change_rate < self.emergency_change_rate:
print("unlock!")
break
time.sleep(0.1)
if __name__ == '__main__':
skin = SoftSkin()
# skin.build_base_line_data()
thread_reading = threading.Thread(target=skin.read_and_record, args=())
time.sleep(1)
thread_reading.start()
skin.unlock()
| 36.518919 | 93 | 0.558467 | import serial
import serial.tools.list_ports
import numpy as np
import math
import threading
import re
import os
import sys
import time
import matplotlib.pyplot as plt
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
data_path = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".." +
os.path.sep + "data")
def print_serial(port):
print("---------------[ %s ]---------------" % port.name)
print("Path: %s" % port.device)
print("Descript: %s" % port.description)
print("HWID: %s" % port.hwid)
if not None == port.manufacturer:
print("Manufacture: %s" % port.manufacturer)
if not None == port.product:
print("Product: %s" % port.product)
if not None == port.interface:
print("Interface: %s" % port.interface)
print()
def detect_serials(location="1-1.1:1.0", vid=0x10c4, pid=0xea60):
ports = serial.tools.list_ports.comports()
for port in ports:
print_serial(port)
if port.location.__contains__(location):
port_path = port.device
return port_path
else:
print("Cannot find the target device: %s" % location)
return None
class SoftSkin(object):
def __init__(self, is_STM32: bool = True):
port_name = detect_serials("1-1.3:1.0")
baud_rate = 115200
print(port_name, baud_rate)
self.serial = serial.Serial(port_name, baud_rate, timeout=None)
self.pwd = os.path.abspath(os.path.abspath(__file__))
self.father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
self.serial = serial.Serial(port_name, baud_rate, timeout=None)
self.raw_data = []
self.base_data = []
self.temp_data = []
self.port_num = 32
self.average_length = 10
self.average_buffer = np.zeros((self.average_length, self.port_num))
self.max_pressure = 0
self.safe_change_rate = 10
self.emergency_change_rate = 50
self.detect_length = 10
self.detect_buffer = np.zeros((self.detect_length, self.port_num))
self.skin_unlock_event = threading.Event()
self.skin_unlock_event.clear()
self.build_base_line_data()
pass
def read_data(self, is_shown=1):
try:
one_line_data = self.serial.readline().decode("utf-8")
one_line_data = one_line_data.strip('SS')
one_line_data = one_line_data.strip('\n')
one_line_data = one_line_data.strip('\r')
one_line_data = one_line_data.split('|')
if is_shown == 1:
print(one_line_data)
if len(one_line_data) == self.port_num:
one_line_data = list(map(float, one_line_data))
one_line_data = list(map(int, one_line_data))
self.raw_data = one_line_data
except BaseException as be:
print("Data Error:", be)
def build_base_line_data(self, initial_size=10):
base_list = []
for i in range(initial_size):
self.read_data(0)
if len(self.raw_data) == self.port_num:
temp_raw_data = self.raw_data
base_list += temp_raw_data
mean_base_list = np.array(base_list).reshape([-1, self.port_num])
add_col = np.ones(mean_base_list.shape[0]).reshape([1, -1])
mean_base_list = add_col.dot(mean_base_list) / mean_base_list.shape[0]
self.base_data = mean_base_list.tolist()[0]
self.base_data = list(map(lambda x: int(x) - 1, self.base_data))
print("base line data: ", self.base_data)
pass
def read_and_record(self, record=False, show=False, plot=False, plot_num=30):
file_path = data_path + os.path.sep + "Softskin.txt"
plot_array = np.zeros((plot_num, self.port_num))
if record:
file = open(file_path, 'w')
while True:
try:
self.read_data(0)
if len(self.raw_data) == len(self.base_data):
temp_data = np.array(self.raw_data) - np.array(self.base_data)
if show:
print(temp_data)
print(self.max_pressure)
if record:
time_index = time.time()
write_data = temp_data.tolist()
write_data.insert(0, time_index)
file.write(str(write_data) + '\n')
file.flush()
self.temp_data = temp_data
self.max_pressure = self.temp_data.max()
self.detect_buffer[0:-1, :] = self.detect_buffer[1:self.detect_length, :]
self.detect_buffer[-1, :] = np.array(self.temp_data)
if plot:
plot_array[0:plot_num - 1, :] = plot_array[1:plot_num, :]
plot_array[plot_num - 1, :] = np.array(temp_data)
plt.clf()
plt.xlabel('Time')
plt.ylabel('pressure')
plt.ylim((-10, 270))
plt.plot(range(0, plot_num), plot_array)
plt.pause(0.0000000001)
except BaseException as be:
print("Data Error:", be)
def update_from_STM32(self, STM32_data: np.ndarray):
try:
self.raw_data = STM32_data
except:
pass
def unlock(self):
while True:
change_rate = self.detect_buffer[-1, :] - self.detect_buffer[0, :]
change_rate = change_rate.max()
if self.safe_change_rate <= change_rate < self.emergency_change_rate:
print("unlock!")
break
time.sleep(0.1)
if __name__ == '__main__':
skin = SoftSkin()
thread_reading = threading.Thread(target=skin.read_and_record, args=())
time.sleep(1)
thread_reading.start()
skin.unlock()
| true | true |
f71c0620c464a57b3ebe69ac3d9aba39ae33da92 | 1,518 | py | Python | olass/models/base_query.py | ufbmi/olass-client | 2fc949d4d59959e7e3ba5ec737b20d8db856b54b | [
"MIT"
] | null | null | null | olass/models/base_query.py | ufbmi/olass-client | 2fc949d4d59959e7e3ba5ec737b20d8db856b54b | [
"MIT"
] | 1 | 2016-08-12T20:52:09.000Z | 2016-08-12T20:59:35.000Z | olass/models/base_query.py | ufbmi/olass-client | 2fc949d4d59959e7e3ba5ec737b20d8db856b54b | [
"MIT"
] | 1 | 2016-06-28T16:43:09.000Z | 2016-06-28T16:43:09.000Z | """
Goal: Provides paginate() function
"""
from sqlalchemy import orm
from olass.models.pagination import Pagination
class BaseQuery(orm.Query):
"""
@see: flask-sqlalchemy/flask_sqlalchemy/__init__.py
"""
def paginate(self, page=None, per_page=None, error_out=True):
"""Returns ``per_page`` items from page ``page``.
If no items are found and ``page`` is greater than 1, or if page is
less than 1, it aborts with 404. This behavior can be disabled by
passing ``error_out=False``.
If ``page`` or ``per_page`` are ``None``, they will default to 1 and 20
respectively. If the values are not ints and ``error_out`` is
``True``, this function will rais exceptions.
Returns a :class:`Pagination` object.
"""
if page is None:
page = 1
if per_page is None:
per_page = 20
if error_out and page < 1:
raise Exception("Pagination error: page < 1")
items = self.limit(per_page).offset(
(page - 1) * per_page).all()
if not items and page != 1 and error_out:
raise Exception("Pagination error: no items and page != 1")
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
| 30.36 | 79 | 0.597497 | from sqlalchemy import orm
from olass.models.pagination import Pagination
class BaseQuery(orm.Query):
def paginate(self, page=None, per_page=None, error_out=True):
if page is None:
page = 1
if per_page is None:
per_page = 20
if error_out and page < 1:
raise Exception("Pagination error: page < 1")
items = self.limit(per_page).offset(
(page - 1) * per_page).all()
if not items and page != 1 and error_out:
raise Exception("Pagination error: no items and page != 1")
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
| true | true |
f71c06f54e6701786b0f29d7880b93fa9b637655 | 649 | py | Python | gui.py | eduardokimmel/ofx_to_xlsx | f36dc430ca2424055feba8f04c7f48cd4741d82c | [
"MIT"
] | null | null | null | gui.py | eduardokimmel/ofx_to_xlsx | f36dc430ca2424055feba8f04c7f48cd4741d82c | [
"MIT"
] | null | null | null | gui.py | eduardokimmel/ofx_to_xlsx | f36dc430ca2424055feba8f04c7f48cd4741d82c | [
"MIT"
] | null | null | null | from tkinter.filedialog import askopenfilename
from tkinter import *
import cli
import gettext
window = Tk()
window.title("ofx_to_xlsx")
def close_window():
window.destroy()
def callback():
ofx = askopenfilename()
cli.run(ofx)
gettext.install('ofx_to_xlsx')
t = gettext.translation('gui_i18n', 'locale', fallback=True)
_ = t.gettext
frame = Frame(window)
frame.pack()
w1 = Label (frame,text = _("Select a OFX file to convert it to Excel"))
w1.pack()
arq = Button (frame, text = _("Select File"), command = callback)
arq.pack()
sair = Button (frame, text = _("Quit"), command = close_window)
sair.pack()
window.mainloop()
exit()
| 19.088235 | 71 | 0.702619 | from tkinter.filedialog import askopenfilename
from tkinter import *
import cli
import gettext
window = Tk()
window.title("ofx_to_xlsx")
def close_window():
window.destroy()
def callback():
ofx = askopenfilename()
cli.run(ofx)
gettext.install('ofx_to_xlsx')
t = gettext.translation('gui_i18n', 'locale', fallback=True)
_ = t.gettext
frame = Frame(window)
frame.pack()
w1 = Label (frame,text = _("Select a OFX file to convert it to Excel"))
w1.pack()
arq = Button (frame, text = _("Select File"), command = callback)
arq.pack()
sair = Button (frame, text = _("Quit"), command = close_window)
sair.pack()
window.mainloop()
exit()
| true | true |
f71c071affad74a0e7aea1a05a898c897c918ab8 | 691 | py | Python | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | 16 | 2021-12-02T18:59:56.000Z | 2022-03-31T11:42:12.000Z | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | null | null | null | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | 1 | 2021-12-09T06:05:22.000Z | 2021-12-09T06:05:22.000Z | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from emtract.model import Model, ModelType
import pandas as pd
class ModelInference:
MODEL_BASE_PATH = 'build/models/'
DATA_BASE_PATH = './emtract/data/'
def __init__(self, model_type):
if model_type == 'twitter':
self.model = Model(ModelType.TWITTER)
else:
self.model = Model(ModelType.STOCK_TWITS)
def inference(self, text):
return self.model.predict([text])
def file_inference(self, file_name, output):
df = pd.read_csv(file_name, header=None)
predictions = self.model.predict(df.iloc[:, 0].values)
predictions.to_csv(output, index=False)
| 27.64 | 62 | 0.662808 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from emtract.model import Model, ModelType
import pandas as pd
class ModelInference:
MODEL_BASE_PATH = 'build/models/'
DATA_BASE_PATH = './emtract/data/'
def __init__(self, model_type):
if model_type == 'twitter':
self.model = Model(ModelType.TWITTER)
else:
self.model = Model(ModelType.STOCK_TWITS)
def inference(self, text):
return self.model.predict([text])
def file_inference(self, file_name, output):
df = pd.read_csv(file_name, header=None)
predictions = self.model.predict(df.iloc[:, 0].values)
predictions.to_csv(output, index=False)
| true | true |
f71c07a140ce757dc105e33553450a64b401d600 | 147 | py | Python | students/k33422/Alexandrin_Anton/Lr2/homework/main/admin.py | aytakr/ITMO_ICT_WebDevelopment_2021-2022 | 57c0eef5e1f413c7f031ee001d59e5122f990f26 | [
"MIT"
] | 7 | 2021-09-02T08:20:58.000Z | 2022-01-12T11:48:07.000Z | students/k33422/Alexandrin_Anton/Lr2/homework/main/admin.py | aytakr/ITMO_ICT_WebDevelopment_2021-2022 | 57c0eef5e1f413c7f031ee001d59e5122f990f26 | [
"MIT"
] | 76 | 2021-09-17T23:01:50.000Z | 2022-03-18T16:42:03.000Z | students/k33422/Alexandrin_Anton/Lr2/homework/main/admin.py | aytakr/ITMO_ICT_WebDevelopment_2021-2022 | 57c0eef5e1f413c7f031ee001d59e5122f990f26 | [
"MIT"
] | 60 | 2021-09-04T16:47:39.000Z | 2022-03-21T04:41:27.000Z | from django.contrib import admin
from .models import *
admin.site.register(Student)
admin.site.register(Homework)
admin.site.register(Assignment)
| 21 | 32 | 0.816327 | from django.contrib import admin
from .models import *
admin.site.register(Student)
admin.site.register(Homework)
admin.site.register(Assignment)
| true | true |
f71c0829da8f53bf5fb8a4964502e266c6e624a6 | 1,918 | py | Python | tests/test_rabbitmq_consumer_command.py | LaEmma/sparrow_cloud | fb9f76ea70b3ba5782c33f3b3379e2ffe4bab08c | [
"MIT"
] | null | null | null | tests/test_rabbitmq_consumer_command.py | LaEmma/sparrow_cloud | fb9f76ea70b3ba5782c33f3b3379e2ffe4bab08c | [
"MIT"
] | null | null | null | tests/test_rabbitmq_consumer_command.py | LaEmma/sparrow_cloud | fb9f76ea70b3ba5782c33f3b3379e2ffe4bab08c | [
"MIT"
] | null | null | null | import os
import unittest
from django.conf.urls import url
from django.http import HttpResponse
def task(*args, **kwargs):
print('*'*10)
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
urlpatterns = [
url(r'^/ssss/xxx/$', detail),
url(r'^/ssuuu/xxddx/$', detail),
]
class RestClientTestCase(unittest.TestCase):
def setUp(self):
os.environ["SPARROW_BROKER_HOST"] = "127.0.0.1:8001"
os.environ["SPARROW_BACKEND_HOST"] = "127.0.0.1:8002"
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
# @mock.patch('rabbitmq_consumer.RabbitMQConsumer.target_func_map', return_value='')
# @mock.patch('rabbitmq_consumer.RabbitMQConsumer.consume', return_value='接收任务成功')
# def test_consumer_command(self, mock_target_func_map, mock_consume):
# from django.conf import settings
# self.setup_settings(settings)
# django.setup()
# out = StringIO()
# call_command('rabbitmq_consumer', '--queue', 'QUEUE_CONF', stdout=out)
# self.assertEqual(out.read(), '')
def setup_settings(self, settings):
settings.XX = "1"
settings.SECRET_KEY = "ss"
settings.SPARROW_RABBITMQ_CONSUMER_CONF = {
"MESSAGE_BROKER_CONF": {
"USER_NAME": "test_name",
"PASSWORD": "test_password",
"VIRTUAL_HOST": "test_virtual",
"BROKER_SERVICE_CONF": "sparrow-test:8001",
},
"MESSAGE_BACKEND_CONF": {
"BACKEND_SERVICE_CONF": "sparrow-test:8001",
"API_PATH": "/api/sparrow_test/task/test_update/"
}
}
settings.QUEUE_CONF={
"QUEUE": "TEST_QUEUE",
"TARGET_FUNC_MAP": {
"ORDER_PAY_SUC_ONLINE": "./task",
}
}
settings.ROOT_URLCONF = __name__
| 31.442623 | 88 | 0.606361 | import os
import unittest
from django.conf.urls import url
from django.http import HttpResponse
def task(*args, **kwargs):
print('*'*10)
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
urlpatterns = [
url(r'^/ssss/xxx/$', detail),
url(r'^/ssuuu/xxddx/$', detail),
]
class RestClientTestCase(unittest.TestCase):
def setUp(self):
os.environ["SPARROW_BROKER_HOST"] = "127.0.0.1:8001"
os.environ["SPARROW_BACKEND_HOST"] = "127.0.0.1:8002"
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
# @mock.patch('rabbitmq_consumer.RabbitMQConsumer.target_func_map', return_value='')
# @mock.patch('rabbitmq_consumer.RabbitMQConsumer.consume', return_value='接收任务成功')
# def test_consumer_command(self, mock_target_func_map, mock_consume):
# from django.conf import settings
# self.setup_settings(settings)
# django.setup()
# out = StringIO()
# call_command('rabbitmq_consumer', '--queue', 'QUEUE_CONF', stdout=out)
# self.assertEqual(out.read(), '')
def setup_settings(self, settings):
settings.XX = "1"
settings.SECRET_KEY = "ss"
settings.SPARROW_RABBITMQ_CONSUMER_CONF = {
"MESSAGE_BROKER_CONF": {
"USER_NAME": "test_name",
"PASSWORD": "test_password",
"VIRTUAL_HOST": "test_virtual",
"BROKER_SERVICE_CONF": "sparrow-test:8001",
},
"MESSAGE_BACKEND_CONF": {
"BACKEND_SERVICE_CONF": "sparrow-test:8001",
"API_PATH": "/api/sparrow_test/task/test_update/"
}
}
settings.QUEUE_CONF={
"QUEUE": "TEST_QUEUE",
"TARGET_FUNC_MAP": {
"ORDER_PAY_SUC_ONLINE": "./task",
}
}
settings.ROOT_URLCONF = __name__
| true | true |
f71c08e2095d6c92591d4e24b87dfa59366adf76 | 1,108 | py | Python | manage.py | cjmabry/PoliChart | 787d987669de4891b1b1ac5f8ebc0ecd38ac2785 | [
"BSD-3-Clause"
] | null | null | null | manage.py | cjmabry/PoliChart | 787d987669de4891b1b1ac5f8ebc0ecd38ac2785 | [
"BSD-3-Clause"
] | null | null | null | manage.py | cjmabry/PoliChart | 787d987669de4891b1b1ac5f8ebc0ecd38ac2785 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from flask.ext.script import Manager
from polichart import create_app, polling
from polichart.extensions import db
from polichart.utils import MALE
from flask import url_for
app = create_app()
manager = Manager(app)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@manager.command
def run():
"""Run in local machine."""
app.run()
@manager.command
def initdb():
"""Init/reset database."""
db.drop_all()
db.create_all()
db.session.commit()
polling.populate_db()
manager.add_option('-c', '--config',
dest="config",
required=False,
help="config file")
if __name__ == "__main__":
manager.run()
| 21.307692 | 58 | 0.617329 |
import os
from flask.ext.script import Manager
from polichart import create_app, polling
from polichart.extensions import db
from polichart.utils import MALE
from flask import url_for
app = create_app()
manager = Manager(app)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@manager.command
def run():
app.run()
@manager.command
def initdb():
db.drop_all()
db.create_all()
db.session.commit()
polling.populate_db()
manager.add_option('-c', '--config',
dest="config",
required=False,
help="config file")
if __name__ == "__main__":
manager.run()
| true | true |
f71c093c62e3d49d16d3c9cbf3b0a2a8b7fd68d6 | 2,128 | py | Python | integrationtest/vm/installation/upgrade/test_zs_upgd_1.3_latest_on_cos7.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/installation/upgrade/test_zs_upgd_1.3_latest_on_cos7.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/installation/upgrade/test_zs_upgd_1.3_latest_on_cos7.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | '''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7_z_1.3')
iso_path = os.environ.get('iso_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_ip(vm_ip, vm_ip, tmp_file)
test_stub.reset_rabbitmq_for_13(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| 33.777778 | 75 | 0.740132 | import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7_z_1.3')
iso_path = os.environ.get('iso_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_ip(vm_ip, vm_ip, tmp_file)
test_stub.reset_rabbitmq_for_13(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack upgrade Test Success')
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| true | true |
f71c0a5782bf191b5540c2eb75fdf8f2a7d65eb4 | 4,189 | py | Python | tools/ISM_ticket_creation/ISM_ticket_creation.py | thahasinab/risksense_tools | 55399f21c680735e3c557484ec8788a33c1525e7 | [
"Apache-2.0"
] | null | null | null | tools/ISM_ticket_creation/ISM_ticket_creation.py | thahasinab/risksense_tools | 55399f21c680735e3c557484ec8788a33c1525e7 | [
"Apache-2.0"
] | null | null | null | tools/ISM_ticket_creation/ISM_ticket_creation.py | thahasinab/risksense_tools | 55399f21c680735e3c557484ec8788a33c1525e7 | [
"Apache-2.0"
] | null | null | null | import requests
import json
import time
import os
import toml
import zipfile
import csv
import pandas as pd
from datetime import date
from datetime import datetime
import logging
def incident_create(ism_url,ism_key,ism_attachment_url,final_directory,tag_name_list,flag_AH,assignee,assignee_desc,profile_link):
Patches_list=[]
Solution_list=[]
VRR_group_list=[]
VRR_list=[]
Plugin_id_list=[]
Asset_info_list=[]
Scanner_name_list=[]
Scanner_title_list=[]
df_App = pd.read_csv(final_directory+"/Assets_"+str(tag_name_list)+".csv", low_memory=False)
df_Host = pd.read_csv(final_directory+"/Findings_"+str(tag_name_list)+".csv", low_memory=False)
df_multiple = pd.read_csv(final_directory+"/Ticket_Findings_"+str(tag_name_list)+".csv", low_memory=False)
#print(assignee)
for j in range(len(df_multiple.axes[0])):
time.sleep(1)
flag = False
create = False
#print(flag_AH)
Scanner_name_list.append(df_multiple.iloc[j]['Scanner Name'])
if(flag_AH == "A"):
Plugin_id_list.append(df_multiple.iloc[j]["Scanner Plugin"])
Asset_info_list.append(df_App.iloc[0]["Address"])
elif(flag_AH == "H"):
Plugin_id_list.append(df_multiple.iloc[j]["Scanner Plugin ID"])
Asset_info_list.append(df_App.iloc[0]["IP Address"])
Scanner_title_list.append(df_multiple.iloc[j]["Vulnerability"])
VRR_list.append(df_multiple.iloc[j]["Vulnerability Risk Rating"])
VRR_group_list.append(df_multiple.iloc[j]["VRR Group"])
Solution_list.append(df_multiple.iloc[j]['Possible Solution'])
Patches_list.append(df_multiple.iloc[j]['Possible Patches'])
payload = json.dumps({"Category": "Account Lockout","Impact": "Medium","Priority": "3","ProfileLink": profile_link,"Service": "Email Service","Source": "Phone","Status": "Active","Subject": "Scanner Name : " + ' , '.join(map(str, Scanner_name_list)) + "|" + " Scanner Plugin ID : " + ' , '.join(map(str, Plugin_id_list)) + "|" + " Scanner Title : " + ' , '.join(map(str, Scanner_title_list)) ,"Symptom": 'Plugin information : \n----------------------------\nPlugin ID : ' + ' , '.join(map(str, Plugin_id_list)) + "\n\nVRR : " + ' , '.join(map(str, VRR_list)) + "|" + ' , '.join(map(str, VRR_group_list)) + "\n\n----------------------------------------------------------------------------------------------------\nAsset Information : \n----------------------------\n" + "Hostname : " + ' , '.join(map(str, Asset_info_list)) + "\n\nSolution : \n\n*) " + '\n*) '.join(map(str, Solution_list)) + "\n\nPatches : \n\n*) " + '\n*) '.join(map(str, Patches_list)),"Urgency": "Medium","Owner": assignee,"OwnerTeam": "Service Desk"})
headers = {
'Authorization': ism_key,
'Content-Type': 'application/json',
'Cookie': 'SID='
}
try:
response = requests.request("POST", ism_url, headers=headers, data=payload)
except Exception as e:
print(e,response.text)
logging.error(e,response.text)
Rec_id_json = response.json()
Rec_id = Rec_id_json["RecId"]
Incident_num = Rec_id_json["IncidentNumber"]
#print(Rec_id,Incident_num)
####### Attachment #######
files = [('file', open(final_directory+"/Assets_"+str(tag_name_list)+".csv",'rb') ), ('file',open(final_directory+"/Ticket_Findings_"+str(tag_name_list)+".csv",'rb') )]
payload={"ObjectID":Rec_id,"ObjectType":"incident#"}
headers = {
'Authorization': ism_key,
'Cookie': 'SID='
}
response = requests.request("POST", ism_attachment_url, headers=headers, data=payload,files=files)
if(response.status_code == 200):
print("Incident is created and attachment is included...")
logging.info("Incident is created and attachment is included...\n")
print(assignee_desc)
logging.info(assignee_desc)
else:
print("There is a problem in attaching the files to the ticket")
logging.error("There is a problem in attaching the files to the ticket\n")
print(assignee_desc)
logging.info(assignee_desc)
return Incident_num
| 46.544444 | 1,031 | 0.627596 | import requests
import json
import time
import os
import toml
import zipfile
import csv
import pandas as pd
from datetime import date
from datetime import datetime
import logging
def incident_create(ism_url,ism_key,ism_attachment_url,final_directory,tag_name_list,flag_AH,assignee,assignee_desc,profile_link):
Patches_list=[]
Solution_list=[]
VRR_group_list=[]
VRR_list=[]
Plugin_id_list=[]
Asset_info_list=[]
Scanner_name_list=[]
Scanner_title_list=[]
df_App = pd.read_csv(final_directory+"/Assets_"+str(tag_name_list)+".csv", low_memory=False)
df_Host = pd.read_csv(final_directory+"/Findings_"+str(tag_name_list)+".csv", low_memory=False)
df_multiple = pd.read_csv(final_directory+"/Ticket_Findings_"+str(tag_name_list)+".csv", low_memory=False)
for j in range(len(df_multiple.axes[0])):
time.sleep(1)
flag = False
create = False
Scanner_name_list.append(df_multiple.iloc[j]['Scanner Name'])
if(flag_AH == "A"):
Plugin_id_list.append(df_multiple.iloc[j]["Scanner Plugin"])
Asset_info_list.append(df_App.iloc[0]["Address"])
elif(flag_AH == "H"):
Plugin_id_list.append(df_multiple.iloc[j]["Scanner Plugin ID"])
Asset_info_list.append(df_App.iloc[0]["IP Address"])
Scanner_title_list.append(df_multiple.iloc[j]["Vulnerability"])
VRR_list.append(df_multiple.iloc[j]["Vulnerability Risk Rating"])
VRR_group_list.append(df_multiple.iloc[j]["VRR Group"])
Solution_list.append(df_multiple.iloc[j]['Possible Solution'])
Patches_list.append(df_multiple.iloc[j]['Possible Patches'])
payload = json.dumps({"Category": "Account Lockout","Impact": "Medium","Priority": "3","ProfileLink": profile_link,"Service": "Email Service","Source": "Phone","Status": "Active","Subject": "Scanner Name : " + ' , '.join(map(str, Scanner_name_list)) + "|" + " Scanner Plugin ID : " + ' , '.join(map(str, Plugin_id_list)) + "|" + " Scanner Title : " + ' , '.join(map(str, Scanner_title_list)) ,"Symptom": 'Plugin information : \n----------------------------\nPlugin ID : ' + ' , '.join(map(str, Plugin_id_list)) + "\n\nVRR : " + ' , '.join(map(str, VRR_list)) + "|" + ' , '.join(map(str, VRR_group_list)) + "\n\n----------------------------------------------------------------------------------------------------\nAsset Information : \n----------------------------\n" + "Hostname : " + ' , '.join(map(str, Asset_info_list)) + "\n\nSolution : \n\n*) " + '\n*) '.join(map(str, Solution_list)) + "\n\nPatches : \n\n*) " + '\n*) '.join(map(str, Patches_list)),"Urgency": "Medium","Owner": assignee,"OwnerTeam": "Service Desk"})
headers = {
'Authorization': ism_key,
'Content-Type': 'application/json',
'Cookie': 'SID='
}
try:
response = requests.request("POST", ism_url, headers=headers, data=payload)
except Exception as e:
print(e,response.text)
logging.error(e,response.text)
Rec_id_json = response.json()
Rec_id = Rec_id_json["RecId"]
Incident_num = Rec_id_json["IncidentNumber"]
sv",'rb') )]
payload={"ObjectID":Rec_id,"ObjectType":"incident#"}
headers = {
'Authorization': ism_key,
'Cookie': 'SID='
}
response = requests.request("POST", ism_attachment_url, headers=headers, data=payload,files=files)
if(response.status_code == 200):
print("Incident is created and attachment is included...")
logging.info("Incident is created and attachment is included...\n")
print(assignee_desc)
logging.info(assignee_desc)
else:
print("There is a problem in attaching the files to the ticket")
logging.error("There is a problem in attaching the files to the ticket\n")
print(assignee_desc)
logging.info(assignee_desc)
return Incident_num
| true | true |
f71c0b020116ae97bd345db15ac8c2ee8e6c6d43 | 28,999 | py | Python | venv/Lib/site-packages/pip/_internal/cli/cmdoptions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 38,667 | 2015-01-01T00:15:34.000Z | 2022-03-31T22:57:03.000Z | venv/Lib/site-packages/pip/_internal/cli/cmdoptions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 1,192 | 2015-01-03T07:59:34.000Z | 2022-03-31T13:22:26.000Z | venv/Lib/site-packages/pip/_internal/cli/cmdoptions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 11,269 | 2015-01-01T08:41:17.000Z | 2022-03-31T16:12:52.000Z | """
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import os
import textwrap
import warnings
from functools import partial
from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values
from textwrap import dedent
from typing import Any, Callable, Dict, Optional, Tuple
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.parser import ConfigOptionParser
from pip._internal.cli.progress_bars import BAR_TYPES
from pip._internal.exceptions import CommandError
from pip._internal.locations import USER_CACHE_DIR, get_src_prefix
from pip._internal.models.format_control import FormatControl
from pip._internal.models.index import PyPI
from pip._internal.models.target_python import TargetPython
from pip._internal.utils.hashes import STRONG_HASHES
from pip._internal.utils.misc import strtobool
def raise_option_error(parser, option, msg):
# type: (OptionParser, Option, str) -> None
"""
Raise an option parsing error using parser.error().
Args:
parser: an OptionParser instance.
option: an Option instance.
msg: the error text.
"""
msg = f"{option} error: {msg}"
msg = textwrap.fill(" ".join(msg.split()))
parser.error(msg)
def make_option_group(group, parser):
# type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group["name"])
for option in group["options"]:
option_group.add_option(option())
return option_group
def check_install_build_global(options, check_options=None):
# type: (Values, Optional[Values]) -> None
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
# type: (str) -> Optional[Any]
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
control.disallow_binaries()
warnings.warn(
"Disabling all use of wheels due to the use of --build-option "
"/ --global-option / --install-option.",
stacklevel=2,
)
def check_dist_restriction(options, check_target=False):
# type: (Values, bool) -> None
"""Function for determining if custom platform options are allowed.
:param options: The OptionParser options.
:param check_target: Whether or not to check if --target is being used.
"""
dist_restriction_set = any(
[
options.python_version,
options.platforms,
options.abis,
options.implementation,
]
)
binary_only = FormatControl(set(), {":all:"})
sdist_dependencies_allowed = (
options.format_control != binary_only and not options.ignore_dependencies
)
# Installations or downloads using dist restrictions must not combine
# source distributions and dist-specific wheels, as they are not
# guaranteed to be locally compatible.
if dist_restriction_set and sdist_dependencies_allowed:
raise CommandError(
"When restricting platform and interpreter constraints using "
"--python-version, --platform, --abi, or --implementation, "
"either --no-deps must be set, or --only-binary=:all: must be "
"set and --no-binary must not be set (or must be set to "
":none:)."
)
if check_target:
if dist_restriction_set and not options.target_dir:
raise CommandError(
"Can not use any platform or abi specific options unless "
"installing via '--target'"
)
def _path_option_check(option, opt, value):
# type: (Option, str, str) -> str
return os.path.expanduser(value)
def _package_name_option_check(option, opt, value):
# type: (Option, str, str) -> str
return canonicalize_name(value)
class PipOption(Option):
TYPES = Option.TYPES + ("path", "package_name")
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["package_name"] = _package_name_option_check
TYPE_CHECKER["path"] = _path_option_check
###########
# options #
###########
help_ = partial(
Option,
"-h",
"--help",
dest="help",
action="help",
help="Show help.",
) # type: Callable[..., Option]
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
) # type: Callable[..., Option]
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
"--require-virtualenv",
"--require-venv",
dest="require_venv",
action="store_true",
default=False,
help=SUPPRESS_HELP,
) # type: Callable[..., Option]
verbose = partial(
Option,
"-v",
"--verbose",
dest="verbose",
action="count",
default=0,
help="Give more output. Option is additive, and can be used up to 3 times.",
) # type: Callable[..., Option]
no_color = partial(
Option,
"--no-color",
dest="no_color",
action="store_true",
default=False,
help="Suppress colored output.",
) # type: Callable[..., Option]
version = partial(
Option,
"-V",
"--version",
dest="version",
action="store_true",
help="Show version and exit.",
) # type: Callable[..., Option]
quiet = partial(
Option,
"-q",
"--quiet",
dest="quiet",
action="count",
default=0,
help=(
"Give less output. Option is additive, and can be used up to 3"
" times (corresponding to WARNING, ERROR, and CRITICAL logging"
" levels)."
),
) # type: Callable[..., Option]
progress_bar = partial(
Option,
"--progress-bar",
dest="progress_bar",
type="choice",
choices=list(BAR_TYPES.keys()),
default="on",
help=(
"Specify type of progress to be displayed ["
+ "|".join(BAR_TYPES.keys())
+ "] (default: %default)"
),
) # type: Callable[..., Option]
log = partial(
PipOption,
"--log",
"--log-file",
"--local-log",
dest="log",
metavar="path",
type="path",
help="Path to a verbose appending log.",
) # type: Callable[..., Option]
no_input = partial(
Option,
# Don't ask for input
"--no-input",
dest="no_input",
action="store_true",
default=False,
help="Disable prompting for input.",
) # type: Callable[..., Option]
proxy = partial(
Option,
"--proxy",
dest="proxy",
type="str",
default="",
help="Specify a proxy in the form [user:passwd@]proxy.server:port.",
) # type: Callable[..., Option]
retries = partial(
Option,
"--retries",
dest="retries",
type="int",
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).",
) # type: Callable[..., Option]
timeout = partial(
Option,
"--timeout",
"--default-timeout",
metavar="sec",
dest="timeout",
type="float",
default=15,
help="Set the socket timeout (default %default seconds).",
) # type: Callable[..., Option]
def exists_action():
# type: () -> Option
return Option(
# Option when path already exist
"--exists-action",
dest="exists_action",
type="choice",
choices=["s", "i", "w", "b", "a"],
default=[],
action="append",
metavar="action",
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
)
cert = partial(
PipOption,
"--cert",
dest="cert",
type="path",
metavar="path",
help=(
"Path to PEM-encoded CA certificate bundle. "
"If provided, overrides the default. "
"See 'SSL Certificate Verification' in pip documentation "
"for more information."
),
) # type: Callable[..., Option]
client_cert = partial(
PipOption,
"--client-cert",
dest="client_cert",
type="path",
default=None,
metavar="path",
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
) # type: Callable[..., Option]
index_url = partial(
Option,
"-i",
"--index-url",
"--pypi-url",
dest="index_url",
metavar="URL",
default=PyPI.simple_url,
help="Base URL of the Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
) # type: Callable[..., Option]
def extra_index_url():
# type: () -> Option
return Option(
"--extra-index-url",
dest="extra_index_urls",
metavar="URL",
action="append",
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index = partial(
Option,
"--no-index",
dest="no_index",
action="store_true",
default=False,
help="Ignore package index (only looking at --find-links URLs instead).",
) # type: Callable[..., Option]
def find_links():
# type: () -> Option
return Option(
"-f",
"--find-links",
dest="find_links",
action="append",
default=[],
metavar="url",
help="If a URL or path to an html file, then parse for links to "
"archives such as sdist (.tar.gz) or wheel (.whl) files. "
"If a local path or file:// URL that's a directory, "
"then look for archives in the directory listing. "
"Links to VCS project URLs are not supported.",
)
def trusted_host():
# type: () -> Option
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host or host:port pair as trusted, even though it "
"does not have valid or any HTTPS.",
)
def constraints():
# type: () -> Option
return Option(
"-c",
"--constraint",
dest="constraints",
action="append",
default=[],
metavar="file",
help="Constrain versions using the given constraints file. "
"This option can be used multiple times.",
)
def requirements():
# type: () -> Option
return Option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help="Install from the given requirements file. "
"This option can be used multiple times.",
)
def editable():
# type: () -> Option
return Option(
"-e",
"--editable",
dest="editables",
action="append",
default=[],
metavar="path/url",
help=(
"Install a project in editable mode (i.e. setuptools "
'"develop mode") from a local project path or a VCS url.'
),
)
def _handle_src(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
value = os.path.abspath(value)
setattr(parser.values, option.dest, value)
src = partial(
PipOption,
"--src",
"--source",
"--source-dir",
"--source-directory",
dest="src_dir",
type="path",
metavar="dir",
default=get_src_prefix(),
action="callback",
callback=_handle_src,
help="Directory to check out editable projects into. "
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".',
) # type: Callable[..., Option]
def _get_format_control(values, option):
# type: (Values, Option) -> Any
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.no_binary,
existing.only_binary,
)
def _handle_only_binary(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.only_binary,
existing.no_binary,
)
def no_binary():
# type: () -> Option
format_control = FormatControl(set(), set())
return Option(
"--no-binary",
dest="format_control",
action="callback",
callback=_handle_no_binary,
type="str",
default=format_control,
help="Do not use binary packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all binary packages, ":none:" to empty the set (notice '
"the colons), or one or more package names with commas between "
"them (no colons). Note that some packages are tricky to compile "
"and may fail to install when this option is used on them.",
)
def only_binary():
# type: () -> Option
format_control = FormatControl(set(), set())
return Option(
"--only-binary",
dest="format_control",
action="callback",
callback=_handle_only_binary,
type="str",
default=format_control,
help="Do not use source packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all source packages, ":none:" to empty the set, or one '
"or more package names with commas between them. Packages "
"without binary distributions will fail to install when this "
"option is used on them.",
)
platforms = partial(
Option,
"--platform",
dest="platforms",
metavar="platform",
action="append",
default=None,
help=(
"Only use wheels compatible with <platform>. Defaults to the "
"platform of the running system. Use this option multiple times to "
"specify multiple platforms supported by the target interpreter."
),
) # type: Callable[..., Option]
# This was made a separate function for unit-testing purposes.
def _convert_python_version(value):
# type: (str) -> Tuple[Tuple[int, ...], Optional[str]]
"""
Convert a version string like "3", "37", or "3.7.3" into a tuple of ints.
:return: A 2-tuple (version_info, error_msg), where `error_msg` is
non-None if and only if there was a parsing error.
"""
if not value:
# The empty string is the same as not providing a value.
return (None, None)
parts = value.split(".")
if len(parts) > 3:
return ((), "at most three version parts are allowed")
if len(parts) == 1:
# Then we are in the case of "3" or "37".
value = parts[0]
if len(value) > 1:
parts = [value[0], value[1:]]
try:
version_info = tuple(int(part) for part in parts)
except ValueError:
return ((), "each version part must be an integer")
return (version_info, None)
def _handle_python_version(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
"""
Handle a provided --python-version value.
"""
version_info, error_msg = _convert_python_version(value)
if error_msg is not None:
msg = "invalid --python-version value: {!r}: {}".format(
value,
error_msg,
)
raise_option_error(parser, option=option, msg=msg)
parser.values.python_version = version_info
python_version = partial(
Option,
"--python-version",
dest="python_version",
metavar="python_version",
action="callback",
callback=_handle_python_version,
type="str",
default=None,
help=dedent(
"""\
The Python interpreter version to use for wheel and "Requires-Python"
compatibility checks. Defaults to a version derived from the running
interpreter. The version can be specified using up to three dot-separated
integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
version can also be given as a string without dots (e.g. "37" for 3.7.0).
"""
),
) # type: Callable[..., Option]
implementation = partial(
Option,
"--implementation",
dest="implementation",
metavar="implementation",
default=None,
help=(
"Only use wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."
),
) # type: Callable[..., Option]
abis = partial(
Option,
"--abi",
dest="abis",
metavar="abi",
action="append",
default=None,
help=(
"Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
"If not specified, then the current interpreter abi tag is used. "
"Use this option multiple times to specify multiple abis supported "
"by the target interpreter. Generally you will need to specify "
"--implementation, --platform, and --python-version when using this "
"option."
),
) # type: Callable[..., Option]
def add_target_python_options(cmd_opts):
# type: (OptionGroup) -> None
cmd_opts.add_option(platforms())
cmd_opts.add_option(python_version())
cmd_opts.add_option(implementation())
cmd_opts.add_option(abis())
def make_target_python(options):
# type: (Values) -> TargetPython
target_python = TargetPython(
platforms=options.platforms,
py_version_info=options.python_version,
abis=options.abis,
implementation=options.implementation,
)
return target_python
def prefer_binary():
# type: () -> Option
return Option(
"--prefer-binary",
dest="prefer_binary",
action="store_true",
default=False,
help="Prefer older binary packages over newer source packages.",
)
cache_dir = partial(
PipOption,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
type="path",
help="Store the cache data in <dir>.",
) # type: Callable[..., Option]
def _handle_no_cache_dir(option, opt, value, parser):
# type: (Option, str, str, OptionParser) -> None
"""
Process a value provided for the --no-cache-dir option.
This is an optparse.Option callback for the --no-cache-dir option.
"""
# The value argument will be None if --no-cache-dir is passed via the
# command-line, since the option doesn't accept arguments. However,
# the value can be non-None if the option is triggered e.g. by an
# environment variable, like PIP_NO_CACHE_DIR=true.
if value is not None:
# Then parse the string value to get argument error-checking.
try:
strtobool(value)
except ValueError as exc:
raise_option_error(parser, option=option, msg=str(exc))
# Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
# converted to 0 (like "false" or "no") caused cache_dir to be disabled
# rather than enabled (logic would say the latter). Thus, we disable
# the cache directory not just on values that parse to True, but (for
# backwards compatibility reasons) also on values that parse to False.
# In other words, always set it to False if the option is provided in
# some (valid) form.
parser.values.cache_dir = False
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="callback",
callback=_handle_no_cache_dir,
help="Disable the cache.",
) # type: Callable[..., Option]
no_deps = partial(
Option,
"--no-deps",
"--no-dependencies",
dest="ignore_dependencies",
action="store_true",
default=False,
help="Don't install package dependencies.",
) # type: Callable[..., Option]
build_dir = partial(
PipOption,
"-b",
"--build",
"--build-dir",
"--build-directory",
dest="build_dir",
type="path",
metavar="dir",
help=SUPPRESS_HELP,
) # type: Callable[..., Option]
ignore_requires_python = partial(
Option,
"--ignore-requires-python",
dest="ignore_requires_python",
action="store_true",
help="Ignore the Requires-Python information.",
) # type: Callable[..., Option]
no_build_isolation = partial(
Option,
"--no-build-isolation",
dest="build_isolation",
action="store_false",
default=True,
help="Disable isolation when building a modern source distribution. "
"Build dependencies specified by PEP 518 must be already installed "
"if this option is used.",
) # type: Callable[..., Option]
def _handle_no_use_pep517(option, opt, value, parser):
# type: (Option, str, str, OptionParser) -> None
"""
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
"""
# Since --no-use-pep517 doesn't accept arguments, the value argument
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False
use_pep517 = partial(
Option,
"--use-pep517",
dest="use_pep517",
action="store_true",
default=None,
help="Use PEP 517 for building source distributions "
"(use --no-use-pep517 to force legacy behaviour).",
) # type: Any
no_use_pep517 = partial(
Option,
"--no-use-pep517",
dest="use_pep517",
action="callback",
callback=_handle_no_use_pep517,
default=None,
help=SUPPRESS_HELP,
) # type: Any
install_options = partial(
Option,
"--install-option",
dest="install_options",
action="append",
metavar="options",
help="Extra arguments to be supplied to the setup.py install "
'command (use like --install-option="--install-scripts=/usr/local/'
'bin"). Use multiple --install-option options to pass multiple '
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.",
) # type: Callable[..., Option]
build_options = partial(
Option,
"--build-option",
dest="build_options",
metavar="options",
action="append",
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
) # type: Callable[..., Option]
global_options = partial(
Option,
"--global-option",
dest="global_options",
action="append",
metavar="options",
help="Extra global options to be supplied to the setup.py "
"call before the install or bdist_wheel command.",
) # type: Callable[..., Option]
no_clean = partial(
Option,
"--no-clean",
action="store_true",
default=False,
help="Don't clean up build directories.",
) # type: Callable[..., Option]
pre = partial(
Option,
"--pre",
action="store_true",
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
) # type: Callable[..., Option]
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
) # type: Callable[..., Option]
def _handle_merge_hash(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(":", 1)
except ValueError:
parser.error(
"Arguments to {} must be a hash name " # noqa
"followed by a value, like --hash=sha256:"
"abcde...".format(opt_str)
)
if algo not in STRONG_HASHES:
parser.error(
"Allowed hash algorithms for {} are {}.".format( # noqa
opt_str, ", ".join(STRONG_HASHES)
)
)
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
"--hash",
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest="hashes",
action="callback",
callback=_handle_merge_hash,
type="string",
help="Verify that the package's archive matches this "
"hash before installing. Example: --hash=sha256:abcdef...",
) # type: Callable[..., Option]
require_hashes = partial(
Option,
"--require-hashes",
dest="require_hashes",
action="store_true",
default=False,
help="Require a hash to check each requirement against, for "
"repeatable installs. This option is implied when any package in a "
"requirements file has a --hash option.",
) # type: Callable[..., Option]
list_path = partial(
PipOption,
"--path",
dest="path",
type="path",
action="append",
help="Restrict to the specified installation path for listing "
"packages (can be used multiple times).",
) # type: Callable[..., Option]
def check_list_path_option(options):
# type: (Values) -> None
if options.path and (options.user or options.local):
raise CommandError("Cannot combine '--path' with '--user' or '--local'")
list_exclude = partial(
PipOption,
"--exclude",
dest="excludes",
action="append",
metavar="package",
type="package_name",
help="Exclude specified package from the output",
) # type: Callable[..., Option]
no_python_version_warning = partial(
Option,
"--no-python-version-warning",
dest="no_python_version_warning",
action="store_true",
default=False,
help="Silence deprecation warnings for upcoming unsupported Pythons.",
) # type: Callable[..., Option]
use_new_feature = partial(
Option,
"--use-feature",
dest="features_enabled",
metavar="feature",
action="append",
default=[],
choices=["2020-resolver", "fast-deps", "in-tree-build"],
help="Enable new functionality, that may be backward incompatible.",
) # type: Callable[..., Option]
use_deprecated_feature = partial(
Option,
"--use-deprecated",
dest="deprecated_features_enabled",
metavar="feature",
action="append",
default=[],
choices=["legacy-resolver"],
help=("Enable deprecated functionality, that will be removed in the future."),
) # type: Callable[..., Option]
##########
# groups #
##########
general_group = {
"name": "General Options",
"options": [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
no_python_version_warning,
use_new_feature,
use_deprecated_feature,
],
} # type: Dict[str, Any]
index_group = {
"name": "Package Index Options",
"options": [
index_url,
extra_index_url,
no_index,
find_links,
],
} # type: Dict[str, Any]
| 28.291707 | 82 | 0.626746 |
import os
import textwrap
import warnings
from functools import partial
from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values
from textwrap import dedent
from typing import Any, Callable, Dict, Optional, Tuple
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.parser import ConfigOptionParser
from pip._internal.cli.progress_bars import BAR_TYPES
from pip._internal.exceptions import CommandError
from pip._internal.locations import USER_CACHE_DIR, get_src_prefix
from pip._internal.models.format_control import FormatControl
from pip._internal.models.index import PyPI
from pip._internal.models.target_python import TargetPython
from pip._internal.utils.hashes import STRONG_HASHES
from pip._internal.utils.misc import strtobool
def raise_option_error(parser, option, msg):
msg = f"{option} error: {msg}"
msg = textwrap.fill(" ".join(msg.split()))
parser.error(msg)
def make_option_group(group, parser):
option_group = OptionGroup(parser, group["name"])
for option in group["options"]:
option_group.add_option(option())
return option_group
def check_install_build_global(options, check_options=None):
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
control.disallow_binaries()
warnings.warn(
"Disabling all use of wheels due to the use of --build-option "
"/ --global-option / --install-option.",
stacklevel=2,
)
def check_dist_restriction(options, check_target=False):
dist_restriction_set = any(
[
options.python_version,
options.platforms,
options.abis,
options.implementation,
]
)
binary_only = FormatControl(set(), {":all:"})
sdist_dependencies_allowed = (
options.format_control != binary_only and not options.ignore_dependencies
)
if dist_restriction_set and sdist_dependencies_allowed:
raise CommandError(
"When restricting platform and interpreter constraints using "
"--python-version, --platform, --abi, or --implementation, "
"either --no-deps must be set, or --only-binary=:all: must be "
"set and --no-binary must not be set (or must be set to "
":none:)."
)
if check_target:
if dist_restriction_set and not options.target_dir:
raise CommandError(
"Can not use any platform or abi specific options unless "
"installing via '--target'"
)
def _path_option_check(option, opt, value):
return os.path.expanduser(value)
def _package_name_option_check(option, opt, value):
return canonicalize_name(value)
class PipOption(Option):
TYPES = Option.TYPES + ("path", "package_name")
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["package_name"] = _package_name_option_check
TYPE_CHECKER["path"] = _path_option_check
p.",
)
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
"--require-virtualenv",
"--require-venv",
dest="require_venv",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
verbose = partial(
Option,
"-v",
"--verbose",
dest="verbose",
action="count",
default=0,
help="Give more output. Option is additive, and can be used up to 3 times.",
)
no_color = partial(
Option,
"--no-color",
dest="no_color",
action="store_true",
default=False,
help="Suppress colored output.",
)
version = partial(
Option,
"-V",
"--version",
dest="version",
action="store_true",
help="Show version and exit.",
)
quiet = partial(
Option,
"-q",
"--quiet",
dest="quiet",
action="count",
default=0,
help=(
"Give less output. Option is additive, and can be used up to 3"
" times (corresponding to WARNING, ERROR, and CRITICAL logging"
" levels)."
),
)
progress_bar = partial(
Option,
"--progress-bar",
dest="progress_bar",
type="choice",
choices=list(BAR_TYPES.keys()),
default="on",
help=(
"Specify type of progress to be displayed ["
+ "|".join(BAR_TYPES.keys())
+ "] (default: %default)"
),
)
log = partial(
PipOption,
"--log",
"--log-file",
"--local-log",
dest="log",
metavar="path",
type="path",
help="Path to a verbose appending log.",
)
no_input = partial(
Option,
"--no-input",
dest="no_input",
action="store_true",
default=False,
help="Disable prompting for input.",
) # type: Callable[..., Option]
proxy = partial(
Option,
"--proxy",
dest="proxy",
type="str",
default="",
help="Specify a proxy in the form [user:passwd@]proxy.server:port.",
) # type: Callable[..., Option]
retries = partial(
Option,
"--retries",
dest="retries",
type="int",
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).",
) # type: Callable[..., Option]
timeout = partial(
Option,
"--timeout",
"--default-timeout",
metavar="sec",
dest="timeout",
type="float",
default=15,
help="Set the socket timeout (default %default seconds).",
) # type: Callable[..., Option]
def exists_action():
# type: () -> Option
return Option(
# Option when path already exist
"--exists-action",
dest="exists_action",
type="choice",
choices=["s", "i", "w", "b", "a"],
default=[],
action="append",
metavar="action",
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
)
cert = partial(
PipOption,
"--cert",
dest="cert",
type="path",
metavar="path",
help=(
"Path to PEM-encoded CA certificate bundle. "
"If provided, overrides the default. "
"See 'SSL Certificate Verification' in pip documentation "
"for more information."
),
) # type: Callable[..., Option]
client_cert = partial(
PipOption,
"--client-cert",
dest="client_cert",
type="path",
default=None,
metavar="path",
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
) # type: Callable[..., Option]
index_url = partial(
Option,
"-i",
"--index-url",
"--pypi-url",
dest="index_url",
metavar="URL",
default=PyPI.simple_url,
help="Base URL of the Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
) # type: Callable[..., Option]
def extra_index_url():
# type: () -> Option
return Option(
"--extra-index-url",
dest="extra_index_urls",
metavar="URL",
action="append",
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index = partial(
Option,
"--no-index",
dest="no_index",
action="store_true",
default=False,
help="Ignore package index (only looking at --find-links URLs instead).",
) # type: Callable[..., Option]
def find_links():
# type: () -> Option
return Option(
"-f",
"--find-links",
dest="find_links",
action="append",
default=[],
metavar="url",
help="If a URL or path to an html file, then parse for links to "
"archives such as sdist (.tar.gz) or wheel (.whl) files. "
"If a local path or file:// URL that's a directory, "
"then look for archives in the directory listing. "
"Links to VCS project URLs are not supported.",
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host or host:port pair as trusted, even though it "
"does not have valid or any HTTPS.",
)
def constraints():
return Option(
"-c",
"--constraint",
dest="constraints",
action="append",
default=[],
metavar="file",
help="Constrain versions using the given constraints file. "
"This option can be used multiple times.",
)
def requirements():
return Option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help="Install from the given requirements file. "
"This option can be used multiple times.",
)
def editable():
return Option(
"-e",
"--editable",
dest="editables",
action="append",
default=[],
metavar="path/url",
help=(
"Install a project in editable mode (i.e. setuptools "
'"develop mode") from a local project path or a VCS url.'
),
)
def _handle_src(option, opt_str, value, parser):
value = os.path.abspath(value)
setattr(parser.values, option.dest, value)
src = partial(
PipOption,
"--src",
"--source",
"--source-dir",
"--source-directory",
dest="src_dir",
type="path",
metavar="dir",
default=get_src_prefix(),
action="callback",
callback=_handle_src,
help="Directory to check out editable projects into. "
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".',
)
def _get_format_control(values, option):
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.no_binary,
existing.only_binary,
)
def _handle_only_binary(option, opt_str, value, parser):
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value,
existing.only_binary,
existing.no_binary,
)
def no_binary():
format_control = FormatControl(set(), set())
return Option(
"--no-binary",
dest="format_control",
action="callback",
callback=_handle_no_binary,
type="str",
default=format_control,
help="Do not use binary packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all binary packages, ":none:" to empty the set (notice '
"the colons), or one or more package names with commas between "
"them (no colons). Note that some packages are tricky to compile "
"and may fail to install when this option is used on them.",
)
def only_binary():
format_control = FormatControl(set(), set())
return Option(
"--only-binary",
dest="format_control",
action="callback",
callback=_handle_only_binary,
type="str",
default=format_control,
help="Do not use source packages. Can be supplied multiple times, and "
'each time adds to the existing value. Accepts either ":all:" to '
'disable all source packages, ":none:" to empty the set, or one '
"or more package names with commas between them. Packages "
"without binary distributions will fail to install when this "
"option is used on them.",
)
platforms = partial(
Option,
"--platform",
dest="platforms",
metavar="platform",
action="append",
default=None,
help=(
"Only use wheels compatible with <platform>. Defaults to the "
"platform of the running system. Use this option multiple times to "
"specify multiple platforms supported by the target interpreter."
),
)
def _convert_python_version(value):
if not value:
return (None, None)
parts = value.split(".")
if len(parts) > 3:
return ((), "at most three version parts are allowed")
if len(parts) == 1:
value = parts[0]
if len(value) > 1:
parts = [value[0], value[1:]]
try:
version_info = tuple(int(part) for part in parts)
except ValueError:
return ((), "each version part must be an integer")
return (version_info, None)
def _handle_python_version(option, opt_str, value, parser):
version_info, error_msg = _convert_python_version(value)
if error_msg is not None:
msg = "invalid --python-version value: {!r}: {}".format(
value,
error_msg,
)
raise_option_error(parser, option=option, msg=msg)
parser.values.python_version = version_info
python_version = partial(
Option,
"--python-version",
dest="python_version",
metavar="python_version",
action="callback",
callback=_handle_python_version,
type="str",
default=None,
help=dedent(
"""\
The Python interpreter version to use for wheel and "Requires-Python"
compatibility checks. Defaults to a version derived from the running
interpreter. The version can be specified using up to three dot-separated
integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
version can also be given as a string without dots (e.g. "37" for 3.7.0).
"""
),
)
implementation = partial(
Option,
"--implementation",
dest="implementation",
metavar="implementation",
default=None,
help=(
"Only use wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."
),
)
abis = partial(
Option,
"--abi",
dest="abis",
metavar="abi",
action="append",
default=None,
help=(
"Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
"If not specified, then the current interpreter abi tag is used. "
"Use this option multiple times to specify multiple abis supported "
"by the target interpreter. Generally you will need to specify "
"--implementation, --platform, and --python-version when using this "
"option."
),
)
def add_target_python_options(cmd_opts):
cmd_opts.add_option(platforms())
cmd_opts.add_option(python_version())
cmd_opts.add_option(implementation())
cmd_opts.add_option(abis())
def make_target_python(options):
target_python = TargetPython(
platforms=options.platforms,
py_version_info=options.python_version,
abis=options.abis,
implementation=options.implementation,
)
return target_python
def prefer_binary():
return Option(
"--prefer-binary",
dest="prefer_binary",
action="store_true",
default=False,
help="Prefer older binary packages over newer source packages.",
)
cache_dir = partial(
PipOption,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
type="path",
help="Store the cache data in <dir>.",
)
def _handle_no_cache_dir(option, opt, value, parser):
# the value can be non-None if the option is triggered e.g. by an
# environment variable, like PIP_NO_CACHE_DIR=true.
if value is not None:
# Then parse the string value to get argument error-checking.
try:
strtobool(value)
except ValueError as exc:
raise_option_error(parser, option=option, msg=str(exc))
# Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
# converted to 0 (like "false" or "no") caused cache_dir to be disabled
# rather than enabled (logic would say the latter). Thus, we disable
# the cache directory not just on values that parse to True, but (for
# backwards compatibility reasons) also on values that parse to False.
# In other words, always set it to False if the option is provided in
# some (valid) form.
parser.values.cache_dir = False
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="callback",
callback=_handle_no_cache_dir,
help="Disable the cache.",
) # type: Callable[..., Option]
no_deps = partial(
Option,
"--no-deps",
"--no-dependencies",
dest="ignore_dependencies",
action="store_true",
default=False,
help="Don't install package dependencies.",
)
build_dir = partial(
PipOption,
"-b",
"--build",
"--build-dir",
"--build-directory",
dest="build_dir",
type="path",
metavar="dir",
help=SUPPRESS_HELP,
)
ignore_requires_python = partial(
Option,
"--ignore-requires-python",
dest="ignore_requires_python",
action="store_true",
help="Ignore the Requires-Python information.",
)
no_build_isolation = partial(
Option,
"--no-build-isolation",
dest="build_isolation",
action="store_false",
default=True,
help="Disable isolation when building a modern source distribution. "
"Build dependencies specified by PEP 518 must be already installed "
"if this option is used.",
)
def _handle_no_use_pep517(option, opt, value, parser):
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False
use_pep517 = partial(
Option,
"--use-pep517",
dest="use_pep517",
action="store_true",
default=None,
help="Use PEP 517 for building source distributions "
"(use --no-use-pep517 to force legacy behaviour).",
) # type: Any
no_use_pep517 = partial(
Option,
"--no-use-pep517",
dest="use_pep517",
action="callback",
callback=_handle_no_use_pep517,
default=None,
help=SUPPRESS_HELP,
) # type: Any
install_options = partial(
Option,
"--install-option",
dest="install_options",
action="append",
metavar="options",
help="Extra arguments to be supplied to the setup.py install "
'command (use like --install-option="--install-scripts=/usr/local/'
'bin"). Use multiple --install-option options to pass multiple '
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.",
) # type: Callable[..., Option]
build_options = partial(
Option,
"--build-option",
dest="build_options",
metavar="options",
action="append",
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
) # type: Callable[..., Option]
global_options = partial(
Option,
"--global-option",
dest="global_options",
action="append",
metavar="options",
help="Extra global options to be supplied to the setup.py "
"call before the install or bdist_wheel command.",
) # type: Callable[..., Option]
no_clean = partial(
Option,
"--no-clean",
action="store_true",
default=False,
help="Don't clean up build directories.",
)
pre = partial(
Option,
"--pre",
action="store_true",
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
)
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
) # type: Callable[..., Option]
def _handle_merge_hash(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(":", 1)
except ValueError:
parser.error(
"Arguments to {} must be a hash name " # noqa
"followed by a value, like --hash=sha256:"
"abcde...".format(opt_str)
)
if algo not in STRONG_HASHES:
parser.error(
"Allowed hash algorithms for {} are {}.".format( # noqa
opt_str, ", ".join(STRONG_HASHES)
)
)
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
"--hash",
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest="hashes",
action="callback",
callback=_handle_merge_hash,
type="string",
help="Verify that the package's archive matches this "
"hash before installing. Example: --hash=sha256:abcdef...",
)
require_hashes = partial(
Option,
"--require-hashes",
dest="require_hashes",
action="store_true",
default=False,
help="Require a hash to check each requirement against, for "
"repeatable installs. This option is implied when any package in a "
"requirements file has a --hash option.",
)
list_path = partial(
PipOption,
"--path",
dest="path",
type="path",
action="append",
help="Restrict to the specified installation path for listing "
"packages (can be used multiple times).",
)
def check_list_path_option(options):
if options.path and (options.user or options.local):
raise CommandError("Cannot combine '--path' with '--user' or '--local'")
list_exclude = partial(
PipOption,
"--exclude",
dest="excludes",
action="append",
metavar="package",
type="package_name",
help="Exclude specified package from the output",
)
no_python_version_warning = partial(
Option,
"--no-python-version-warning",
dest="no_python_version_warning",
action="store_true",
default=False,
help="Silence deprecation warnings for upcoming unsupported Pythons.",
)
use_new_feature = partial(
Option,
"--use-feature",
dest="features_enabled",
metavar="feature",
action="append",
default=[],
choices=["2020-resolver", "fast-deps", "in-tree-build"],
help="Enable new functionality, that may be backward incompatible.",
)
use_deprecated_feature = partial(
Option,
"--use-deprecated",
dest="deprecated_features_enabled",
metavar="feature",
action="append",
default=[],
choices=["legacy-resolver"],
help=("Enable deprecated functionality, that will be removed in the future."),
)
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
no_python_version_warning,
use_new_feature,
use_deprecated_feature,
],
}
index_group = {
"name": "Package Index Options",
"options": [
index_url,
extra_index_url,
no_index,
find_links,
],
}
| true | true |
f71c0e613c8658e9c5d1b5f8b68e473cd366b6c5 | 8,773 | py | Python | src/sst/elements/memHierarchy/tests/testsuite_default_memHierarchy_sdl.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | null | null | null | src/sst/elements/memHierarchy/tests/testsuite_default_memHierarchy_sdl.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | null | null | null | src/sst/elements/memHierarchy/tests/testsuite_default_memHierarchy_sdl.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
import os.path
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
try:
# Put your single instance Init Code Here
pass
except:
pass
module_init = 1
module_sema.release()
################################################################################
################################################################################
################################################################################
class testcase_memHierarchy_sdl(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
def test_memHierarchy_sdl_1(self):
# sdl-1 Simple CPU + 1 level cache + Memory
self.memHierarchy_Template("sdl-1")
def test_memHierarchy_sdl_2(self):
# sdl-2 Simple CPU + 1 level cache + DRAMSim Memory
self.memHierarchy_Template("sdl-2")
def test_memHierarchy_sdl_3(self):
# sdl-3 Simple CPU + 1 level cache + DRAMSim Memory (alternate block size)
self.memHierarchy_Template("sdl-3")
def test_memHierarchy_sdl2_1(self):
# sdl2-1 Simple CPU + 2 levels cache + Memory
self.memHierarchy_Template("sdl2-1")
def test_memHierarchy_sdl3_1(self):
# sdl3-1 2 Simple CPUs + 2 levels cache + Memory
self.memHierarchy_Template("sdl3-1")
def test_memHierarchy_sdl3_2(self):
# sdl3-2 2 Simple CPUs + 2 levels cache + DRAMSim Memory
self.memHierarchy_Template("sdl3-2")
def test_memHierarchy_sdl3_3(self):
self.memHierarchy_Template("sdl3-3")
def test_memHierarchy_sdl4_1(self):
self.memHierarchy_Template("sdl4-1")
@skip_on_sstsimulator_conf_empty_str("DRAMSIM", "LIBDIR", "DRAMSIM is not included as part of this build")
def test_memHierarchy_sdl4_2_dramsim(self):
self.memHierarchy_Template("sdl4-2", ignore_err_file=True)
@skip_on_sstsimulator_conf_empty_str("RAMULATOR", "LIBDIR", "RAMULATOR is not included as part of this build")
def test_memHierarchy_sdl4_2_ramulator(self):
self.memHierarchy_Template("sdl4-2-ramulator")
@skip_on_sstsimulator_conf_empty_str("DRAMSIM", "LIBDIR", "DRAMSIM is not included as part of this build")
def test_memHierarchy_sdl5_1_dramsim(self):
self.memHierarchy_Template("sdl5-1", ignore_err_file=True)
@skip_on_sstsimulator_conf_empty_str("RAMULATOR", "LIBDIR", "RAMULATOR is not included as part of this build")
def test_memHierarchy_sdl5_1_ramulator(self):
if testing_check_get_num_ranks() > 1 or testing_check_get_num_threads() > 1:
self.memHierarchy_Template("sdl5-1-ramulator_MC")
else:
self.memHierarchy_Template("sdl5-1-ramulator")
def test_memHierarchy_sdl8_1(self):
self.memHierarchy_Template("sdl8-1")
def test_memHierarchy_sdl8_3(self):
self.memHierarchy_Template("sdl8-3")
def test_memHierarchy_sdl8_4(self):
self.memHierarchy_Template("sdl8-4")
def test_memHierarchy_sdl9_1(self):
self.memHierarchy_Template("sdl9-1")
def test_memHierarchy_sdl9_2(self):
self.memHierarchy_Template("sdl9-2")
#####
def memHierarchy_Template(self, testcase, ignore_err_file=False):
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
# Some tweeking of file names are due to inconsistencys with testcase name
testcasename_sdl = testcase.replace("_MC", "")
testcasename_out = testcase.replace("-", "_")
# Set the various file paths
testDataFileName=("test_memHierarchy_{0}".format(testcasename_out))
sdlfile = "{0}/{1}.py".format(test_path, testcasename_sdl)
reffile = "{0}/refFiles/{1}.out".format(test_path, testDataFileName)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
mpioutfiles = "{0}/{1}.testfile".format(outdir, testDataFileName)
log_debug("testcase = {0}".format(testcase))
log_debug("sdl file = {0}".format(sdlfile))
log_debug("ref file = {0}".format(reffile))
# Run SST in the tests directory
self.run_sst(sdlfile, outfile, errfile, set_cwd=test_path, mpi_out_files=mpioutfiles)
# Lines to ignore
# These are generated by DRAMSim
ignore_lines = ["===== MemorySystem"]
ignore_lines.append("TOTAL_STORAGE : 2048MB | 1 Ranks | 16 Devices per rank")
ignore_lines.append("== Loading")
ignore_lines.append("DRAMSim2 Clock Frequency =1Hz, CPU Clock Frequency=1Hz")
ignore_lines.append("WARNING: UNKNOWN KEY 'DEBUG_TRANS_FLOW' IN INI FILE")
# This is generated by SST when the number of ranks/threads > # of components
ignore_lines.append("WARNING: No components are assigned to")
#These are warnings/info generated by SST/memH in debug mode
ignore_lines.append("Notice: memory controller's region is larger than the backend's mem_size")
ignore_lines.append("Region: start=")
# This may be present if ranks < 2
ignore_lines.append("not aligned to the request size")
# Statistics that count occupancy on each cycle sometimes diff in parallel execution
# due to the synchronization interval sometimes allowing the clock to run ahead a cycle or so
tol_stats = { "outstanding_requests" : [0, 0, 20, 0, 0], # Only diffs in number of cycles
"total_cycles" : [20, 'X', 20, 20, 20], # This stat is set once at the end of sim. May vary in all fields
"MSHR_occupancy" : [0, 0, 20, 0, 0] } # Only diffs in number of cycles
filesAreTheSame, statDiffs, othDiffs = testing_stat_output_diff(outfile, reffile, ignore_lines, tol_stats, True)
# Perform the tests
if ignore_err_file is False:
if os_test_file(errfile, "-s"):
log_testing_note("memHierarchy SDL test {0} has a Non-Empty Error File {1}".format(testDataFileName, errfile))
if filesAreTheSame:
log_debug(" -- Output file {0} passed check against the Reference File {1}".format(outfile, reffile))
else:
diffdata = self._prettyPrintDiffs(statDiffs, othDiffs)
log_failure(diffdata)
self.assertTrue(filesAreTheSame, "Output file {0} does not pass check against the Reference File {1} ".format(outfile, reffile))
###
# Remove lines containing any string found in 'remove_strs' from in_file
# If out_file != None, output is out_file
# Otherwise, in_file is overwritten
def _remove_lines_cleanup_file(self, remove_strs, in_file, out_file = None, append = False):
with open(in_file, 'r') as fp:
lines = fp.readlines()
if out_file == None:
out_file = in_file
if append == True:
mode = 'a'
else:
mode = 'w'
with open(out_file, mode) as fp:
if not append:
fp.truncate(0)
for line in lines:
skip = False
for search in remove_strs:
if search in line:
skip = True
continue
if not skip:
fp.write(line)
def _prettyPrintDiffs(self, stat_diff, oth_diff):
out = ""
if len(stat_diff) != 0:
out = "Statistic diffs:\n"
for x in stat_diff:
out += (x[0] + " " + ",".join(str(y) for y in x[1:]) + "\n")
if len(oth_diff) != 0:
out += "Non-statistic diffs:\n"
for x in oth_diff:
out += x[0] + " " + x[1] + "\n"
return out
| 40.428571 | 140 | 0.619514 |
from sst_unittest import *
from sst_unittest_support import *
import os.path
| true | true |
f71c0f38bd33c235f501fe3157c2816cd7eb598d | 2,430 | py | Python | data/p4VQE/R4/benchmark/startQiskit_noisy62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_noisy62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_noisy62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.y(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy62.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27 | 118 | 0.631687 |
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
prog.y(input_qubit[3])
prog.y(input_qubit[3])
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy62.csv", "w")
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true | true |
f71c0f815fea461fc753f56d6e32829a423105fd | 431 | py | Python | project/tasks/sample_tasks.py | idjemaoune/django-celery | 411e854fc63a4d42be2a6e8861c4dc6b89969161 | [
"MIT"
] | null | null | null | project/tasks/sample_tasks.py | idjemaoune/django-celery | 411e854fc63a4d42be2a6e8861c4dc6b89969161 | [
"MIT"
] | null | null | null | project/tasks/sample_tasks.py | idjemaoune/django-celery | 411e854fc63a4d42be2a6e8861c4dc6b89969161 | [
"MIT"
] | null | null | null | # project/tasks/sample_tasks.py
import time
from celery import shared_task
@shared_task
def send_email(email_id, message):
time.sleep(10)
print(f"Email is sent to {email_id}. Message sent was - {message}")
@shared_task
def get_micro_app_status(app):
print(f"La micro app {app}. est UP")
@shared_task
def create_task(task_type):
time.sleep(int(task_type) * 10)
print("je suis execueter")
return True
| 17.958333 | 71 | 0.716937 |
import time
from celery import shared_task
@shared_task
def send_email(email_id, message):
time.sleep(10)
print(f"Email is sent to {email_id}. Message sent was - {message}")
@shared_task
def get_micro_app_status(app):
print(f"La micro app {app}. est UP")
@shared_task
def create_task(task_type):
time.sleep(int(task_type) * 10)
print("je suis execueter")
return True
| true | true |
f71c0fb67deffb5ad5e92f615b5c852fabdd95ff | 9,938 | py | Python | docs/_downloads/d923ca53b1bfbeb3c222ae46d65d485e/transfer_learning_tutorial.py | pleiades-s/PyTorch-tutorials-kr | 3d749ea2fe67363b5d46340b742308b744fa0419 | [
"BSD-3-Clause"
] | 2 | 2021-01-18T04:59:05.000Z | 2021-03-20T00:56:24.000Z | docs/_downloads/d923ca53b1bfbeb3c222ae46d65d485e/transfer_learning_tutorial.py | pleiades-s/PyTorch-tutorials-kr | 3d749ea2fe67363b5d46340b742308b744fa0419 | [
"BSD-3-Clause"
] | null | null | null | docs/_downloads/d923ca53b1bfbeb3c222ae46d65d485e/transfer_learning_tutorial.py | pleiades-s/PyTorch-tutorials-kr | 3d749ea2fe67363b5d46340b742308b744fa0419 | [
"BSD-3-Clause"
] | 1 | 2022-02-27T10:47:39.000Z | 2022-02-27T10:47:39.000Z | # -*- coding: utf-8 -*-
"""
컴퓨터 비전(Vision)을 위한 전이학습(Transfer Learning)
=======================================================
**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_
**번역**: `박정환 <http://github.com/9bow>`_
이 튜토리얼에서는 전이학습(Transfer Learning)을 이용하여 이미지 분류를 위한
합성곱 신경망을 어떻게 학습시키는지 배워보겠습니다. 전이학습에 대해서는
`CS231n 노트 <http://cs231n.github.io/transfer-learning/>`__ 에서 더 많은 내용을
읽어보실 수 있습니다.
위 노트를 인용해보면,
실제로 충분한 크기의 데이터셋을 갖추기는 상대적으로 드물기 때문에,
(무작위 초기화를 통해) 맨 처음부터 합성곱 신경망(Convolutional
Network) 전체를 학습하는 사람은 매우 적습니다. 대신, 매우 큰 데이터셋(예.
100가지 분류에 대해 120만개의 이미지가 포함된 ImageNet)에서 합성곱
신경망(ConvNet)을 미리 학습한 후, 이 합성곱 신경망을 관심있는 작업
을 위한 초기 설정 또는 고정된 특징 추출기(fixed feature extractor)로 사용합니다.
이러한 전이학습 시나리오의 주요한 2가지는 다음과 같습니다:
- **합성곱 신경망의 미세조정(finetuning)**: 무작위 초기화 대신, 신경망을
ImageNet 1000 데이터셋 등으로 미리 학습한 신경망으로 초기화합니다. 학습의 나머지
과정들은 평상시와 같습니다.
- **고정된 특징 추출기로써의 합성곱 신경망**: 여기서는 마지막에 완전히 연결
된 계층을 제외한 모든 신경망의 가중치를 고정합니다. 이 마지막의 완전히 연결된
계층은 새로운 무작위의 가중치를 갖는 계층으로 대체되어 이 계층만 학습합니다.
"""
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # 대화형 모드
######################################################################
# 데이터 불러오기
# ---------------
#
# 데이터를 불러오기 위해 torchvision과 torch.utils.data 패키지를 사용하겠습니다.
#
# 여기서 풀고자 하는 문제는 **개미** 와 **벌** 을 분류하는 모델을 학습하는 것입니다.
# 개미와 벌 각각의 학습용 이미지는 대략 120장 정도 있고, 75개의 검증용 이미지가
# 있습니다. 일반적으로 맨 처음부터 학습을 한다면 이는 일반화하기에는 아주 작은
# 데이터셋입니다. 하지만 우리는 전이학습을 할 것이므로, 일반화를 제법 잘 할 수 있을
# 것입니다.
#
# 이 데이터셋은 ImageNet의 아주 작은 일부입니다.
#
# .. Note ::
# 데이터를 `여기 <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_
# 에서 다운로드 받아 현재 디렉토리에 압축을 푸십시오.
# 학습을 위해 데이터 증가(augmentation) 및 일반화(normalization)
# 검증을 위한 일반화
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# 일부 이미지 시각화하기
# ^^^^^^^^^^^^^^^^^^^^^^^^^
# 데이터 증가를 이해하기 위해 일부 학습용 이미지를 시각화해보겠습니다.
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # 갱신이 될 때까지 잠시 기다립니다.
# 학습 데이터의 배치를 얻습니다.
inputs, classes = next(iter(dataloaders['train']))
# 배치로부터 격자 형태의 이미지를 만듭니다.
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
######################################################################
# 모델 학습하기
# --------------
#
# 이제 모델을 학습하기 위한 일반 함수를 작성해보겠습니다. 여기서는 다음 내용들을
# 설명합니다:
#
# - 학습율(learning rate) 관리(scheduling)
# - 최적의 모델 구하기
#
# 아래에서 ``scheduler`` 매개변수는 ``torch.optim.lr_scheduler`` 의 LR 스케쥴러
# 객체(Object)입니다.
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.
for phase in ['train', 'val']:
if phase == 'train':
model.train() # 모델을 학습 모드로 설정
else:
model.eval() # 모델을 평가 모드로 설정
running_loss = 0.0
running_corrects = 0
# 데이터를 반복
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# 매개변수 경사도를 0으로 설정
optimizer.zero_grad()
# 순전파
# 학습 시에만 연산 기록을 추적
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# 학습 단계인 경우 역전파 + 최적화
if phase == 'train':
loss.backward()
optimizer.step()
# 통계
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 모델을 깊은 복사(deep copy)함
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 가장 나은 모델 가중치를 불러옴
model.load_state_dict(best_model_wts)
return model
######################################################################
# 모델 예측값 시각화하기
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# 일부 이미지에 대한 예측값을 보여주는 일반화된 함수입니다.
#
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
######################################################################
# 합성곱 신경망 미세조정(finetuning)
# ----------------------------------
#
# 미리 학습한 모델을 불러온 후 마지막의 완전히 연결된 계층을 초기화합니다.
#
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# 여기서 각 출력 샘플의 크기는 2로 설정합니다.
# 또는, nn.Linear(num_ftrs, len (class_names))로 일반화할 수 있습니다.
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# 모든 매개변수들이 최적화되었는지 관찰
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# 7 에폭마다 0.1씩 학습율 감소
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
######################################################################
# 학습 및 평가하기
# ^^^^^^^^^^^^^^^^^^
#
# CPU에서는 15-25분 가량, GPU에서는 1분도 이내의 시간이 걸립니다.
#
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
######################################################################
#
visualize_model(model_ft)
######################################################################
# 고정된 특징 추출기로써의 합성곱 신경망
# ---------------------------------------
#
# 이제, 마지막 계층을 제외한 신경망의 모든 부분을 고정해야 합니다.
# ``requires_grad == False`` 로 설정하여 매개변수를 고정하여 ``backward()`` 중에
# 경사도가 계산되지 않도록 해야합니다.
#
# 이에 대한 문서는
# `여기 <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__
# 에서 확인할 수 있습니다.
#
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# 새로 생성된 모듈의 매개변수는 기본값이 requires_grad=True 임
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# 이전과는 다르게 마지막 계층의 매개변수들만 최적화되는지 관찰
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# 7 에폭마다 0.1씩 학습율 감소
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
######################################################################
# 학습 및 평가하기
# ^^^^^^^^^^^^^^^^^
#
# CPU에서 실행하는 경우 이전과 비교했을 때 약 절반 가량의 시간만이 소요될 것입니다.
# 이는 대부분의 신경망에서 경사도를 계산할 필요가 없기 때문입니다. 하지만,
# 순전파는 계산이 필요합니다.
#
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
######################################################################
#
visualize_model(model_conv)
plt.ioff()
plt.show()
######################################################################
# 더 배워볼 내용
# -----------------
#
# 전이학습의 응용 사례(application)들을 더 알아보려면,
# :doc:`/intermediate/quantized_transfer_learning_tutorial` 을 참조해보세요.
#
#
| 29.229412 | 88 | 0.553934 |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion()
| true | true |
f71c100fdfb127051c66f6dd3ec6cfe317c4ad61 | 3,783 | py | Python | my3b1b/old/MicoaelPrimo.py | Micoael/3b1b-styled-video-code | 036b339573e48f807e215bc7c7be9c6fe32b601d | [
"Apache-2.0"
] | 7 | 2020-03-02T23:56:39.000Z | 2020-06-08T15:05:46.000Z | my3b1b/old/MicoaelPrimo.py | Micoael/3b1b-styled-video-code | 036b339573e48f807e215bc7c7be9c6fe32b601d | [
"Apache-2.0"
] | null | null | null | my3b1b/old/MicoaelPrimo.py | Micoael/3b1b-styled-video-code | 036b339573e48f807e215bc7c7be9c6fe32b601d | [
"Apache-2.0"
] | null | null | null | from manimlib.imports import *
class StartingScene(Scene):
def construct(_):
e = Text("Manim homework by mp",font="Consolas",color=BLUE)
_.play(Write(e),run_time=3)
_.wait()
_.play(Uncreate(e))
A = Dot().move_to(np.array([0-2,0,0]))
B = Dot().move_to(np.array([9/10-2,12/10,0]))
C = Dot().move_to(np.array([5/2-2,0,0]))
D = B.copy().shift(9/10*UP+6/5*LEFT)
E = A.copy().shift(9/10*UP+6/5*LEFT)
F = B.copy().shift(8/5*UP+6/5*RIGHT)
G = C.copy().shift(8/5*UP+6/5*RIGHT)
H = A.copy().shift(5/2*DOWN)
I = C.copy().shift(5/2*DOWN)
lab = VGroup()
labtxt = [TextMobject("A").next_to(A).scale(0.5),
TextMobject("B").next_to(B).scale(0.5),
TextMobject("C").next_to(C).scale(0.5),
TextMobject("D").next_to(D).scale(0.5),
TextMobject("E").next_to(E).scale(0.5),
TextMobject("F").next_to(F).scale(0.5),
TextMobject("G").next_to(G).scale(0.5),
TextMobject("H").next_to(H).scale(0.5),
TextMobject("I").next_to(I).scale(0.5),
]
for i in range(len(labtxt)):
lab.add(labtxt[i])
original_trangle = Polygon(A.get_center(),B.get_center(),C.get_center(),color=ORANGE,fill_color = ORANGE,fill_opacity=0.5)
rect1 = Polygon(A.get_center(),B.get_center(),D.get_center(),E.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect2 = Polygon(B.get_center(),F.get_center(),G.get_center(),C.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect3 = Polygon(A.get_center(),C.get_center(),I.get_center(),H.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
tran1 = Polygon(D.get_center(),F.get_center(),B.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran2 = Polygon(E.get_center(),A.get_center(),H.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran3 = Polygon(C.get_center(),G.get_center(),I.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
def getc1(obj):
obj.move_to(tran1.get_center())
def getc2(obj):
obj.move_to(tran2.get_center())
def getc3(obj):
obj.move_to(tran3.get_center())
S1 = TexMobject("S1").add_updater(getc1)
S2 = TexMobject("S2").add_updater(getc2)
S3 = TexMobject("S3").add_updater(getc3)
trans = VGroup(tran1,tran2,tran3,S1,S2,S3)
# _.add(A,B,C,D,E,F,G,H,I,lab,original_trangle,rect1,rect2,rect3,tran1,tran2,tran3,S1,S2,S3)
_.play(ShowCreation(original_trangle))
_.wait()
_.play(ShowCreation(rect1),ShowCreation(rect2),ShowCreation(rect3))
_.wait()
_.play(ShowCreation(tran1),ShowCreation(tran2),ShowCreation(tran3)
,Write(S1),Write(S2),Write(S3) ,)
_.wait()
_.play(FadeOut(rect1),FadeOut(rect2),FadeOut(rect3))
_.wait()
_.play(Rotate(tran1,PI/2,about_point = B.get_center()),
Rotate(tran2,PI/2,about_point = A.get_center()),
Rotate(tran3,PI/2,about_point = C.get_center()) )
_.play(Transform(tran1,original_trangle))
_.play(Transform(tran2,original_trangle))
_.play(Transform(tran3,original_trangle))
S1.clear_updaters()
S2.clear_updaters()
S3.clear_updaters()
_.play(S1.shift,2*UP+1.5*LEFT)
_.play(S2.shift,2*UP)
_.play(S3.shift,2*UP+1.5*RIGHT)
eq = TextMobject("=").next_to(S1)
eq2 = TextMobject("=").next_to(S2)
_.play(Write(eq),Write(eq2))
| 49.12987 | 133 | 0.576791 | from manimlib.imports import *
class StartingScene(Scene):
def construct(_):
e = Text("Manim homework by mp",font="Consolas",color=BLUE)
_.play(Write(e),run_time=3)
_.wait()
_.play(Uncreate(e))
A = Dot().move_to(np.array([0-2,0,0]))
B = Dot().move_to(np.array([9/10-2,12/10,0]))
C = Dot().move_to(np.array([5/2-2,0,0]))
D = B.copy().shift(9/10*UP+6/5*LEFT)
E = A.copy().shift(9/10*UP+6/5*LEFT)
F = B.copy().shift(8/5*UP+6/5*RIGHT)
G = C.copy().shift(8/5*UP+6/5*RIGHT)
H = A.copy().shift(5/2*DOWN)
I = C.copy().shift(5/2*DOWN)
lab = VGroup()
labtxt = [TextMobject("A").next_to(A).scale(0.5),
TextMobject("B").next_to(B).scale(0.5),
TextMobject("C").next_to(C).scale(0.5),
TextMobject("D").next_to(D).scale(0.5),
TextMobject("E").next_to(E).scale(0.5),
TextMobject("F").next_to(F).scale(0.5),
TextMobject("G").next_to(G).scale(0.5),
TextMobject("H").next_to(H).scale(0.5),
TextMobject("I").next_to(I).scale(0.5),
]
for i in range(len(labtxt)):
lab.add(labtxt[i])
original_trangle = Polygon(A.get_center(),B.get_center(),C.get_center(),color=ORANGE,fill_color = ORANGE,fill_opacity=0.5)
rect1 = Polygon(A.get_center(),B.get_center(),D.get_center(),E.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect2 = Polygon(B.get_center(),F.get_center(),G.get_center(),C.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect3 = Polygon(A.get_center(),C.get_center(),I.get_center(),H.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
tran1 = Polygon(D.get_center(),F.get_center(),B.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran2 = Polygon(E.get_center(),A.get_center(),H.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran3 = Polygon(C.get_center(),G.get_center(),I.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
def getc1(obj):
obj.move_to(tran1.get_center())
def getc2(obj):
obj.move_to(tran2.get_center())
def getc3(obj):
obj.move_to(tran3.get_center())
S1 = TexMobject("S1").add_updater(getc1)
S2 = TexMobject("S2").add_updater(getc2)
S3 = TexMobject("S3").add_updater(getc3)
trans = VGroup(tran1,tran2,tran3,S1,S2,S3)
_.play(ShowCreation(original_trangle))
_.wait()
_.play(ShowCreation(rect1),ShowCreation(rect2),ShowCreation(rect3))
_.wait()
_.play(ShowCreation(tran1),ShowCreation(tran2),ShowCreation(tran3)
,Write(S1),Write(S2),Write(S3) ,)
_.wait()
_.play(FadeOut(rect1),FadeOut(rect2),FadeOut(rect3))
_.wait()
_.play(Rotate(tran1,PI/2,about_point = B.get_center()),
Rotate(tran2,PI/2,about_point = A.get_center()),
Rotate(tran3,PI/2,about_point = C.get_center()) )
_.play(Transform(tran1,original_trangle))
_.play(Transform(tran2,original_trangle))
_.play(Transform(tran3,original_trangle))
S1.clear_updaters()
S2.clear_updaters()
S3.clear_updaters()
_.play(S1.shift,2*UP+1.5*LEFT)
_.play(S2.shift,2*UP)
_.play(S3.shift,2*UP+1.5*RIGHT)
eq = TextMobject("=").next_to(S1)
eq2 = TextMobject("=").next_to(S2)
_.play(Write(eq),Write(eq2))
| true | true |
f71c111b67dac5359468b1d2de3970e43bfa4ea3 | 5,551 | py | Python | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
1027. Longest Arithmetic Subsequence
Medium
Given an array nums of integers, return the length of the longest arithmetic subsequence in nums.
Recall that a subsequence of an array nums is a list nums[i1], nums[i2], ..., nums[ik] with 0 <= i1 < i2 < ... < ik <= nums.length - 1, and that a sequence seq is arithmetic if seq[i+1] - seq[i] are all the same value (for 0 <= i < seq.length - 1).
Example 1:
Input: nums = [3,6,9,12]
Output: 4
Explanation:
The whole array is an arithmetic sequence with steps of length = 3.
Example 2:
Input: nums = [9,4,7,2,10]
Output: 3
Explanation:
The longest arithmetic subsequence is [4,7,10].
Example 3:
Input: nums = [20,1,15,3,10,5,8]
Output: 4
Explanation:
The longest arithmetic subsequence is [20,15,10,5].
Constraints:
2 <= nums.length <= 1000
0 <= nums[i] <= 500
"""
# V0
# IDEA : DP
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# V0'
# IDEA : HASH TABLE
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1
# https://www.796t.com/article.php?id=154559
# http://www.noteanddata.com/leetcode-1027-Longest-Arithmetic-Sequence-Google-Interview-Problem-java-solution-note.html
# https://blog.csdn.net/w5688414/article/details/109696664
# V1
# IDEA : HASH
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1'
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/275395/python-O(n**2)-solution
class Solution:
def longestArithSeqLength(self, A):
# Constant seq: '0000', O(len(A) )
ct = collections.Counter(A)
ans = max(2, max(ct[i] for i in ct))
# Increasing seq:'1234', O(len(A)**2 )
ansdic = {}
for i in range(len(A)):
for j in range(i):
a0, a1, a2 = A[j]*2-A[i], A[j], A[i]
if a0 == a1:continue
if (a0, a1) in ansdic:
ansdic[a1, a2] = ansdic[a0, a1] + 1
ans = max(ansdic[a1, a2], ans)
else:
ansdic[a1, a2] = 2
return ans
# V1''
# IDEA : HASH SET
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274625/simple-hash-Set-Python
class Solution(object):
def longestArithSeqLength(self, A):
res = 2
if len(A) <= 2:
return len(A)
cnt = {}
node = {}
mx = {}
curr = A[1] - A[0]
cnt[(curr,1)] = 2
node[curr] = set()
node[curr].add(1)
mx[curr] = 2
res = 2
for i in range(2,len(A)):
for j in range(i):
dis = A[i] - A[j]
if dis in node:
if j in node[dis]:
cnt[(dis,i)] = cnt[(dis,j)] + 1
#node[dis].remove(j)
node[dis].add(i)
mx[dis] = max(mx[dis], cnt[(dis,i)])
res = max(mx[dis],res)
else:
cnt[(dis,i)] = 2
node[dis].add(i)
else:
cnt[(dis,i)] = 2
node[dis] = set()
node[dis].add(i)
mx[dis] = 2
return res
# V1'''
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274611/JavaC%2B%2BPython-DP
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# V1''''
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/514742/Python-DP
class Solution:
def longestArithSeqLength(self, A):
DP = {}
A_len = len(A)
for right in range(1, A_len):
for left in range(right):
diff = A[right] - A[left]
#if (diff, left) in DP:
# DP[(diff, right)] = DP[(diff, left)] + 1
#else:
# DP[(diff, right)] = 2
DP[(diff, right)] = DP.get((diff,left), 1) + 1
return max(DP.values())
# V2 | 31.361582 | 248 | 0.503513 |
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# IDEA : HASH TABLE
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1
# https://www.796t.com/article.php?id=154559
# http://www.noteanddata.com/leetcode-1027-Longest-Arithmetic-Sequence-Google-Interview-Problem-java-solution-note.html
# https://blog.csdn.net/w5688414/article/details/109696664
# V1
# IDEA : HASH
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1'
class Solution:
def longestArithSeqLength(self, A):
ct = collections.Counter(A)
ans = max(2, max(ct[i] for i in ct))
ansdic = {}
for i in range(len(A)):
for j in range(i):
a0, a1, a2 = A[j]*2-A[i], A[j], A[i]
if a0 == a1:continue
if (a0, a1) in ansdic:
ansdic[a1, a2] = ansdic[a0, a1] + 1
ans = max(ansdic[a1, a2], ans)
else:
ansdic[a1, a2] = 2
return ans
class Solution(object):
def longestArithSeqLength(self, A):
res = 2
if len(A) <= 2:
return len(A)
cnt = {}
node = {}
mx = {}
curr = A[1] - A[0]
cnt[(curr,1)] = 2
node[curr] = set()
node[curr].add(1)
mx[curr] = 2
res = 2
for i in range(2,len(A)):
for j in range(i):
dis = A[i] - A[j]
if dis in node:
if j in node[dis]:
cnt[(dis,i)] = cnt[(dis,j)] + 1
node[dis].add(i)
mx[dis] = max(mx[dis], cnt[(dis,i)])
res = max(mx[dis],res)
else:
cnt[(dis,i)] = 2
node[dis].add(i)
else:
cnt[(dis,i)] = 2
node[dis] = set()
node[dis].add(i)
mx[dis] = 2
return res
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274611/JavaC%2B%2BPython-DP
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# V1''''
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/514742/Python-DP
class Solution:
def longestArithSeqLength(self, A):
DP = {}
A_len = len(A)
for right in range(1, A_len):
for left in range(right):
diff = A[right] - A[left]
#if (diff, left) in DP:
# DP[(diff, right)] = DP[(diff, left)] + 1
#else:
# DP[(diff, right)] = 2
DP[(diff, right)] = DP.get((diff,left), 1) + 1
return max(DP.values())
# V2 | true | true |
f71c1149bd0ccd4c108d6852f0e7e33eb102e6e2 | 1,820 | py | Python | tests/util/test_i18n.py | zsluedem/MonkTrader | 760942a59919b34c876467bc0eb4afb30689cbc1 | [
"MIT"
] | 2 | 2018-11-17T06:39:36.000Z | 2019-01-18T13:14:15.000Z | tests/util/test_i18n.py | zsluedem/MonkTrader | 760942a59919b34c876467bc0eb4afb30689cbc1 | [
"MIT"
] | 37 | 2018-11-04T15:05:04.000Z | 2019-03-09T09:26:30.000Z | tests/util/test_i18n.py | zsluedem/MonkTrader | 760942a59919b34c876467bc0eb4afb30689cbc1 | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from unittest.mock import MagicMock, patch
from monkq.utils.i18n import LazyTranslation
def test_lazytranslation_not_setting() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = None
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.NullTranslations().gettext.assert_called()
def test_lazytranslation() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = os.path.abspath(__file__)
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.GNUTranslations().gettext.assert_called()
| 37.142857 | 79 | 0.737363 |
import os
from unittest.mock import MagicMock, patch
from monkq.utils.i18n import LazyTranslation
def test_lazytranslation_not_setting() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = None
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.NullTranslations().gettext.assert_called()
def test_lazytranslation() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = os.path.abspath(__file__)
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.GNUTranslations().gettext.assert_called()
| true | true |
f71c138dfb3853f24c17b2b530c0d786d88a9cf0 | 5,959 | py | Python | modelEpochs.py | JDMusc/Online-Bullying-Image-Classifcation | 9196c60c554cf160d68cb9e9c41fda124abebf63 | [
"MIT"
] | null | null | null | modelEpochs.py | JDMusc/Online-Bullying-Image-Classifcation | 9196c60c554cf160d68cb9e9c41fda124abebf63 | [
"MIT"
] | null | null | null | modelEpochs.py | JDMusc/Online-Bullying-Image-Classifcation | 9196c60c554cf160d68cb9e9c41fda124abebf63 | [
"MIT"
] | null | null | null | import copy
import numpy as np
from numpy import log10
import os
from toolz import pipe as p
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import numpy as np
import preprocessing as pp
def findParam(model, name_filter):
if callable(name_filter):
fn = name_filter
else:
name_filter = [name_filter] if type(name_filter) is str else name_filter
fn = lambda param_name: all(
component in param_name for component in name_filter)
return [(pn, pv) for (pn, pv) in model.named_parameters() if fn(pn)]
def setParameterRequiresGrad(model, requires_grad = False, params = None):
params = model.parameters() if params is None else params
for param in params:
param.requires_grad = requires_grad
def runEpochs(
model, criterion,
dataloaders, dataset_sizes, device,
log_params_verbose, num_epochs,
optimizer, scheduler,
writer):
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
prev_model_wts = best_model_wts
for epoch in range(num_epochs):
epoch_acc, model_wts = _run_epoch(
model,
criterion, dataloaders, dataset_sizes, device,
epoch, log_params_verbose, num_epochs,
optimizer, scheduler, writer)
_log_coef_diffs(writer, epoch, prev_model_wts, model_wts)
prev_model_wts = model_wts
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model_wts
# load best model weights
model.load_state_dict(best_model_wts)
return (model, best_acc)
def viewParamsToBeUpdated(model):
return [n for (n,p) in model.named_parameters() if p.requires_grad == True]
def add_graph_model(writer, model, dataloaders, device):
inputs, classes = p(dataloaders['train'], iter, next)
inputs = inputs.to(device)
classes = classes.to(device)
writer.add_graph(model, inputs)
def _run_epoch(model,
criterion, dataloaders, dataset_sizes, device,
epoch, log_params_verbose, num_epochs,
optimizer, scheduler, writer):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
n_samples = {'train': 0, 'val': 0}
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
is_train = phase == 'train'
if is_train:
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
n_samples[phase] = n_samples[phase] + len(labels)
inputs = inputs.to(device)
labels = labels.to(device)
preds, loss = _take_step(
model, criterion, optimizer, inputs, labels, is_train)
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
_log_epoch_phase_stats(writer, epoch, phase, epoch_loss, epoch_acc)
if log_params_verbose:
_log_model_params_verbose(writer, model, epoch, phase)
# deep copy the model
model_wts = copy.deepcopy(model.state_dict())
_log_lr(writer, epoch, scheduler)
print('# training samples')
print(n_samples['train'])
print('# val samples')
print(n_samples['val'])
return epoch_acc, model_wts
def _take_step(model, criterion, optimizer, inputs, labels, is_train):
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(is_train):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if is_train:
loss.backward()
optimizer.step()
return preds, loss
def _add_scope(scope, k):
return scope + '/' + k
def _add_scope_gen(scope):
return lambda k: _add_scope(scope, k)
def _log_model_params_verbose(writer, model, run_num, scope, use_hist = False):
def write(tag, param):
fn = writer.add_histogram if use_hist else writer.add_scalar
param = param if use_hist else param.abs().mean()
return fn(tag, param, run_num)
with torch.no_grad():
for (name, param) in model.named_parameters():
p(name,
_add_scope_gen(scope),
lambda tag: write(tag, param)
)
def _log_lr(writer, epoch, scheduler):
lr = p(scheduler.get_lr(), np.array)[0]
p('lr',
_add_scope_gen('lr'),
lambda _: writer.add_scalar(_, lr, epoch)
)
p('log10_lr',
_add_scope_gen('lr'),
lambda _: writer.add_scalar(_, log10(lr), epoch)
)
def _log_epoch_phase_stats(writer, epoch, scope, epoch_loss, epoch_acc):
log_measure = lambda k, v: p(k,
_add_scope_gen(scope),
lambda _ : writer.add_scalar(_, v, epoch)
)
log_measure('loss', epoch_loss)
log_measure('accuracy', epoch_acc)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
scope, epoch_loss, epoch_acc))
def _log_coef_diffs(writer, epoch, prev_model_state, curr_model_state):
def write(name, curr):
diff = curr - prev_model_state[name]
p(name,
_add_scope_gen('params'),
lambda _: writer.add_scalar(
_ + '.diff', diff.abs().mean(), epoch)
)
with torch.no_grad():
for name in curr_model_state:
if ('weight' in name or 'bias' in name):
write(name, curr_model_state[name])
| 27.587963 | 80 | 0.614365 | import copy
import numpy as np
from numpy import log10
import os
from toolz import pipe as p
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import numpy as np
import preprocessing as pp
def findParam(model, name_filter):
if callable(name_filter):
fn = name_filter
else:
name_filter = [name_filter] if type(name_filter) is str else name_filter
fn = lambda param_name: all(
component in param_name for component in name_filter)
return [(pn, pv) for (pn, pv) in model.named_parameters() if fn(pn)]
def setParameterRequiresGrad(model, requires_grad = False, params = None):
params = model.parameters() if params is None else params
for param in params:
param.requires_grad = requires_grad
def runEpochs(
model, criterion,
dataloaders, dataset_sizes, device,
log_params_verbose, num_epochs,
optimizer, scheduler,
writer):
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
prev_model_wts = best_model_wts
for epoch in range(num_epochs):
epoch_acc, model_wts = _run_epoch(
model,
criterion, dataloaders, dataset_sizes, device,
epoch, log_params_verbose, num_epochs,
optimizer, scheduler, writer)
_log_coef_diffs(writer, epoch, prev_model_wts, model_wts)
prev_model_wts = model_wts
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model_wts
model.load_state_dict(best_model_wts)
return (model, best_acc)
def viewParamsToBeUpdated(model):
return [n for (n,p) in model.named_parameters() if p.requires_grad == True]
def add_graph_model(writer, model, dataloaders, device):
inputs, classes = p(dataloaders['train'], iter, next)
inputs = inputs.to(device)
classes = classes.to(device)
writer.add_graph(model, inputs)
def _run_epoch(model,
criterion, dataloaders, dataset_sizes, device,
epoch, log_params_verbose, num_epochs,
optimizer, scheduler, writer):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
n_samples = {'train': 0, 'val': 0}
for phase in ['train', 'val']:
is_train = phase == 'train'
if is_train:
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
n_samples[phase] = n_samples[phase] + len(labels)
inputs = inputs.to(device)
labels = labels.to(device)
preds, loss = _take_step(
model, criterion, optimizer, inputs, labels, is_train)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
_log_epoch_phase_stats(writer, epoch, phase, epoch_loss, epoch_acc)
if log_params_verbose:
_log_model_params_verbose(writer, model, epoch, phase)
model_wts = copy.deepcopy(model.state_dict())
_log_lr(writer, epoch, scheduler)
print('# training samples')
print(n_samples['train'])
print('# val samples')
print(n_samples['val'])
return epoch_acc, model_wts
def _take_step(model, criterion, optimizer, inputs, labels, is_train):
optimizer.zero_grad()
with torch.set_grad_enabled(is_train):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if is_train:
loss.backward()
optimizer.step()
return preds, loss
def _add_scope(scope, k):
return scope + '/' + k
def _add_scope_gen(scope):
return lambda k: _add_scope(scope, k)
def _log_model_params_verbose(writer, model, run_num, scope, use_hist = False):
def write(tag, param):
fn = writer.add_histogram if use_hist else writer.add_scalar
param = param if use_hist else param.abs().mean()
return fn(tag, param, run_num)
with torch.no_grad():
for (name, param) in model.named_parameters():
p(name,
_add_scope_gen(scope),
lambda tag: write(tag, param)
)
def _log_lr(writer, epoch, scheduler):
lr = p(scheduler.get_lr(), np.array)[0]
p('lr',
_add_scope_gen('lr'),
lambda _: writer.add_scalar(_, lr, epoch)
)
p('log10_lr',
_add_scope_gen('lr'),
lambda _: writer.add_scalar(_, log10(lr), epoch)
)
def _log_epoch_phase_stats(writer, epoch, scope, epoch_loss, epoch_acc):
log_measure = lambda k, v: p(k,
_add_scope_gen(scope),
lambda _ : writer.add_scalar(_, v, epoch)
)
log_measure('loss', epoch_loss)
log_measure('accuracy', epoch_acc)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
scope, epoch_loss, epoch_acc))
def _log_coef_diffs(writer, epoch, prev_model_state, curr_model_state):
def write(name, curr):
diff = curr - prev_model_state[name]
p(name,
_add_scope_gen('params'),
lambda _: writer.add_scalar(
_ + '.diff', diff.abs().mean(), epoch)
)
with torch.no_grad():
for name in curr_model_state:
if ('weight' in name or 'bias' in name):
write(name, curr_model_state[name])
| true | true |
f71c143502daacb5d0cac62a6b711503065c58e7 | 505 | py | Python | functions/l3Consume.py | yuklia/serverless-lambda-chaining | dd24129933489c2f1a522b37d8f4c3e16eb47285 | [
"MIT"
] | null | null | null | functions/l3Consume.py | yuklia/serverless-lambda-chaining | dd24129933489c2f1a522b37d8f4c3e16eb47285 | [
"MIT"
] | null | null | null | functions/l3Consume.py | yuklia/serverless-lambda-chaining | dd24129933489c2f1a522b37d8f4c3e16eb47285 | [
"MIT"
] | null | null | null | import json
def handler(event, context):
message_from_publisher = json.loads(event['Records'][0]['Sns']['Message'])
my_param = message_from_publisher['myParamFromConsumerPublisher']
print("👷 Received paramater from ConsumerPublisher: '{0}'".format(my_param))
body = {
"message": "Go Serverless v1.0! Your function executed successfully!",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
| 24.047619 | 80 | 0.637624 | import json
def handler(event, context):
message_from_publisher = json.loads(event['Records'][0]['Sns']['Message'])
my_param = message_from_publisher['myParamFromConsumerPublisher']
print("👷 Received paramater from ConsumerPublisher: '{0}'".format(my_param))
body = {
"message": "Go Serverless v1.0! Your function executed successfully!",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
| true | true |
f71c145bb6baf46c8e813d23d04f55fadd9b6a4e | 112 | py | Python | test/nohtml.py | eaybek/nohtml | 9df8fc032891591516d8a719ebc15440d8cc7a0c | [
"MIT"
] | null | null | null | test/nohtml.py | eaybek/nohtml | 9df8fc032891591516d8a719ebc15440d8cc7a0c | [
"MIT"
] | null | null | null | test/nohtml.py | eaybek/nohtml | 9df8fc032891591516d8a719ebc15440d8cc7a0c | [
"MIT"
] | null | null | null | import unittest
class NohtmlTest(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| 12.444444 | 36 | 0.705357 | import unittest
class NohtmlTest(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| true | true |
f71c154fa91e7b9687d0c58c927b63ccf6253ccb | 3,682 | py | Python | ReUseModel/TestFAEModel.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null | ReUseModel/TestFAEModel.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null | ReUseModel/TestFAEModel.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null |
import os
import csv
import numpy as np
from FAE.FeatureAnalysis.Normalizer import Normalizer
from FAE.DataContainer.DataContainer import DataContainer
from FAE.FeatureAnalysis.Classifier import Classifier
from FAE.Func.Metric import EstimateMetirc
from FAE.FeatureAnalysis.FeatureSelector import FeatureSelector
from FAE.FeatureAnalysis.CrossValidation import CrossValidation
def LoadTrainInfo(model_folder):
train_info = {}
##Load normalizaiton
normalizer = Normalizer()
normalization_path = ''
for sub_file in os.listdir(model_folder):
if sub_file.rfind('_normalization_training.csv') != -1:
normalization_path = os.path.join(model_folder, sub_file)
if not os.path.exists(normalization_path):
print('Check the normalization name : zero_center_normalization')
else:
normalizer.Load(normalization_path)
train_info['normalizer'] = normalizer
## Load selected features
selected_feature_path = os.path.join(model_folder, 'feature_select_info.csv')
selected_feature_list = []
with open(selected_feature_path, 'r', newline='') as f:
f_reader = csv.reader(f)
for index in f_reader:
if index[0] == 'selected_feature':
selected_feature_list = index[1:]
if selected_feature_list == []:
print('No selected features')
train_info['selected_features'] = selected_feature_list
## Load FAE model
classifier = Classifier()
classifier.Load(model_folder)
train_info['classifier'] = classifier
return train_info
def TestNewData(NewDataCsv, model_folder, result_save_path):
'''
:param NewDataCsv: New radiomics feature matrix csv file path
:param model_folder:The trained model path
:return:classification result
'''
train_info = LoadTrainInfo(model_folder)
new_data_container = DataContainer()
#Normlization
new_data_container.Load(NewDataCsv)
feature_selector = FeatureSelector()
feature_selector.SelectFeatureByName(new_data_container, train_info['selected_features'], is_replace=True)
new_data_container = train_info['normalizer'].Transform(new_data_container)
# data_frame = new_data_container.GetFrame()
# data_frame = data_frame[train_info['selected_features']]
# new_data_container.SetFrame(data_frame)
# new_data_container.UpdateDataByFrame()
##Model
train_info['classifier'].SetDataContainer(new_data_container)
model = train_info['classifier'].GetModel()
predict = model.predict_proba(new_data_container.GetArray())[:, 1]
label = new_data_container.GetLabel()
case_name = new_data_container.GetCaseName()
np.save(os.path.join(result_save_path, 'test_predict.npy'), predict)
np.save(os.path.join(result_save_path, 'test_label.npy'), label)
test_result_info = [['CaseName', 'Pred', 'Label']]
for index in range(len(label)):
test_result_info.append([case_name[index], predict[index], label[index]])
with open(os.path.join(result_save_path, 'test_info.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(test_result_info)
metric = EstimateMetirc(predict, label)
info = {}
info.update(metric)
cv = CrossValidation()
cv.SaveResult(info, result_save_path)
# print(metric)
return metric
if __name__ == '__main__':
TestNewData(r'D:\hospital\Huangli\smote\test_numeric_feature.csv',
r'D:\hospital\Huangli\smote\process-result\Norm0Center_PCC_ANOVA_5_LR',
r'D:\MyScript\demo') | 32.875 | 111 | 0.69962 |
import os
import csv
import numpy as np
from FAE.FeatureAnalysis.Normalizer import Normalizer
from FAE.DataContainer.DataContainer import DataContainer
from FAE.FeatureAnalysis.Classifier import Classifier
from FAE.Func.Metric import EstimateMetirc
from FAE.FeatureAnalysis.FeatureSelector import FeatureSelector
from FAE.FeatureAnalysis.CrossValidation import CrossValidation
def LoadTrainInfo(model_folder):
train_info = {}
Normalizer()
normalization_path = ''
for sub_file in os.listdir(model_folder):
if sub_file.rfind('_normalization_training.csv') != -1:
normalization_path = os.path.join(model_folder, sub_file)
if not os.path.exists(normalization_path):
print('Check the normalization name : zero_center_normalization')
else:
normalizer.Load(normalization_path)
train_info['normalizer'] = normalizer
ath = os.path.join(model_folder, 'feature_select_info.csv')
selected_feature_list = []
with open(selected_feature_path, 'r', newline='') as f:
f_reader = csv.reader(f)
for index in f_reader:
if index[0] == 'selected_feature':
selected_feature_list = index[1:]
if selected_feature_list == []:
print('No selected features')
train_info['selected_features'] = selected_feature_list
= Classifier()
classifier.Load(model_folder)
train_info['classifier'] = classifier
return train_info
def TestNewData(NewDataCsv, model_folder, result_save_path):
train_info = LoadTrainInfo(model_folder)
new_data_container = DataContainer()
new_data_container.Load(NewDataCsv)
feature_selector = FeatureSelector()
feature_selector.SelectFeatureByName(new_data_container, train_info['selected_features'], is_replace=True)
new_data_container = train_info['normalizer'].Transform(new_data_container)
ain_info['classifier'].SetDataContainer(new_data_container)
model = train_info['classifier'].GetModel()
predict = model.predict_proba(new_data_container.GetArray())[:, 1]
label = new_data_container.GetLabel()
case_name = new_data_container.GetCaseName()
np.save(os.path.join(result_save_path, 'test_predict.npy'), predict)
np.save(os.path.join(result_save_path, 'test_label.npy'), label)
test_result_info = [['CaseName', 'Pred', 'Label']]
for index in range(len(label)):
test_result_info.append([case_name[index], predict[index], label[index]])
with open(os.path.join(result_save_path, 'test_info.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(test_result_info)
metric = EstimateMetirc(predict, label)
info = {}
info.update(metric)
cv = CrossValidation()
cv.SaveResult(info, result_save_path)
return metric
if __name__ == '__main__':
TestNewData(r'D:\hospital\Huangli\smote\test_numeric_feature.csv',
r'D:\hospital\Huangli\smote\process-result\Norm0Center_PCC_ANOVA_5_LR',
r'D:\MyScript\demo') | true | true |
f71c159e39dcb104058740ebee5fb752312a3553 | 160 | py | Python | cranes/__init__.py | annehulsey/high-resolution_post-earthquake_recovery_simulation_of_safety_cordons | 8b8bedceee0343d22143f48992136fc2fc34e191 | [
"MIT"
] | null | null | null | cranes/__init__.py | annehulsey/high-resolution_post-earthquake_recovery_simulation_of_safety_cordons | 8b8bedceee0343d22143f48992136fc2fc34e191 | [
"MIT"
] | null | null | null | cranes/__init__.py | annehulsey/high-resolution_post-earthquake_recovery_simulation_of_safety_cordons | 8b8bedceee0343d22143f48992136fc2fc34e191 | [
"MIT"
] | null | null | null | from .base import *
from .mapping import *
from .community_damage_sampling import *
from .downtime_logistics import *
from .analysis_and_visualization import *
| 26.666667 | 41 | 0.8125 | from .base import *
from .mapping import *
from .community_damage_sampling import *
from .downtime_logistics import *
from .analysis_and_visualization import *
| true | true |
f71c1615d02213bb8e244cd957fa6b17a89b9787 | 2,153 | py | Python | plugins/zscaler/icon_zscaler/actions/lookup_url/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/zscaler/icon_zscaler/actions/lookup_url/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/zscaler/icon_zscaler/actions/lookup_url/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Look up the categorization of a given set of URLs"
class Input:
URLS = "urls"
class Output:
URL_CATEGORIZATION = "url_categorization"
class LookupUrlInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"urls": {
"type": "array",
"title": "URLs",
"description": "The given set of URLs or domains to be looked up",
"items": {
"type": "string"
},
"order": 1
}
},
"required": [
"urls"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class LookupUrlOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"url_categorization": {
"type": "array",
"title": "URL Categorization",
"description": "Information about given URLs",
"items": {
"$ref": "#/definitions/url_categorization"
},
"order": 1
}
},
"required": [
"url_categorization"
],
"definitions": {
"url_categorization": {
"type": "object",
"title": "url_categorization",
"properties": {
"url": {
"type": "string",
"title": "URL",
"description": "Checked URL",
"order": 1
},
"urlClassifications": {
"type": "array",
"title": "URL Classifications",
"description": "URL classifications",
"items": {
"type": "string"
},
"order": 2
},
"urlClassificationsWithSecurityAlert": {
"type": "array",
"title": "URL classifications with security alert",
"description": "URL classifications with security alert",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 21.53 | 72 | 0.533674 |
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Look up the categorization of a given set of URLs"
class Input:
URLS = "urls"
class Output:
URL_CATEGORIZATION = "url_categorization"
class LookupUrlInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"urls": {
"type": "array",
"title": "URLs",
"description": "The given set of URLs or domains to be looked up",
"items": {
"type": "string"
},
"order": 1
}
},
"required": [
"urls"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class LookupUrlOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"url_categorization": {
"type": "array",
"title": "URL Categorization",
"description": "Information about given URLs",
"items": {
"$ref": "#/definitions/url_categorization"
},
"order": 1
}
},
"required": [
"url_categorization"
],
"definitions": {
"url_categorization": {
"type": "object",
"title": "url_categorization",
"properties": {
"url": {
"type": "string",
"title": "URL",
"description": "Checked URL",
"order": 1
},
"urlClassifications": {
"type": "array",
"title": "URL Classifications",
"description": "URL classifications",
"items": {
"type": "string"
},
"order": 2
},
"urlClassificationsWithSecurityAlert": {
"type": "array",
"title": "URL classifications with security alert",
"description": "URL classifications with security alert",
"items": {
"type": "string"
},
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
f71c1852ffabfd6520c29aa0e5896783707f3f65 | 9,759 | py | Python | pyhtmlcv.py | lietu/pyhtmlcv | 68e75d5b761f4cbf4315d9c573fdc39872abbc20 | [
"BSD-3-Clause"
] | 3 | 2017-02-15T14:02:57.000Z | 2019-04-30T23:33:55.000Z | pyhtmlcv.py | lietu/pyhtmlcv | 68e75d5b761f4cbf4315d9c573fdc39872abbc20 | [
"BSD-3-Clause"
] | null | null | null | pyhtmlcv.py | lietu/pyhtmlcv | 68e75d5b761f4cbf4315d9c573fdc39872abbc20 | [
"BSD-3-Clause"
] | 1 | 2017-06-01T15:58:09.000Z | 2017-06-01T15:58:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyHtmlCv is a tool to that can be used to generate HTML CVs from a
simple JSON configuration.
:copyright: (c) 2012-2019 Janne Enberg
:license: BSD
"""
from argparse import ArgumentParser, ArgumentTypeError
import codecs
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
import json
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
import re
import sass
import six
import shutil
import sys
from time import sleep
TEMPLATE_PATH = Path("templates")
def str2bool(value):
"""
Convert CLI args to boolean
:param str value:
:return bool:
"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError("Boolean value expected.")
def get_last_change(path):
"""
Figure out when the given path has last changed, recursively if a directory
"""
last_changed = path.stat().st_mtime
if path.is_dir():
for entry in path.glob("**/*"):
entry_changed = entry.stat().st_mtime
if entry_changed > last_changed:
last_changed = entry_changed
return last_changed
def run(options):
"""
Generate the CV page from the source + template
"""
try:
with codecs.open(options.source, encoding="utf-8") as f:
config = json.load(f)
except ValueError as e:
print("Error parsing config {}.".format(options.source))
print("")
raise
except IOError as e:
print("Configuration file not found: {}".format(options.source))
print("")
raise
validate_config(config)
process_config(config)
generate_cv(options.target, options.template, config)
def generate_cv(destination, template, config):
# Get the template
template_path = str(TEMPLATE_PATH / template)
env = Environment(loader=FileSystemLoader(template_path))
template = env.get_template("index.html")
# Generate a few variables for the template
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S %z")
year = now.strftime("%Y")
navigation = generate_navigation(config)
# Render the template into HTML
html = template.render(
name=config["name"],
contact=config["contact"],
sections=config["sections"],
navigation=navigation,
now=current_time,
year=year,
)
# Make sure that the destination path is deleted first
dst_path = Path(destination)
if dst_path.exists():
shutil.rmtree(destination)
shutil.copytree(template_path, destination)
# Compile Sass/SCSS
scss_files = []
for entry in dst_path.glob("**/*.scss"):
scss_files.append(entry)
entry_name = entry.name
if entry_name.endswith(".scss") and not entry_name.startswith("_"):
entry_str = str(entry)
compiled = sass.compile(filename=entry_str)
entry_css = Path(entry_str[:-5] + ".css")
with entry_css.open("w", encoding="utf-8") as f:
f.write(compiled)
print("Compiled {} to {}".format(entry, entry_css))
# Delete unnecessary files after compilation
for entry in scss_files:
entry.unlink()
# And any left over empty directories
for entry in reversed(list(dst_path.rglob("*"))):
if entry.exists() and entry.is_dir():
empty = True
for _ in entry.iterdir():
empty = False
break
if empty:
entry.rmdir()
# Write the result HTML
full_path = Path(destination) / "index.html"
with full_path.open("w", encoding="utf-8") as f:
f.write(html)
print("Generated CV HTML to {}".format(full_path))
def validate_config(config):
error = False
if "name" not in config:
print('Missing name definition, e.g. { "name": "Janne Enberg", ' "... }")
error = True
if "contact" not in config:
print(
"Missing contact definition, e.g. { ..., "
'"contact": "+1 (2) 345 678 | contact@example.com", ... }'
)
error = True
if "sections" not in config:
print("Missing sections definition, e.g. { ..., " '"sections": [ ... ] }')
error = True
else:
for section in config["sections"]:
# String sections need no other validation
if isinstance(section, six.string_types):
continue
if "title" not in section:
print(
"Missing title from section definition, , "
'e.g. { ..., "sections": [ {"title": "Section '
'title", ...} ] }'
)
print("Found: {}".format(section))
error = True
if (
"fields" not in section
and "large" not in section
and "largeList" not in section
):
print(
"No fields, largeList or large definition for "
"section, , "
'e.g. { ..., "sections": [ {..., '
'"large": "Yadi yadi yada", ...} ] }'
)
error = True
if "fields" in section:
for field in section["fields"]:
if not isinstance(field, list) or len(field) != 2:
print(
"Invalid field definition, "
"it should have two items, e.g. { ..., "
'"sections": [ {..., "fields": [ ["Label",'
' "Value"], ... }, ... ] }'
)
error = True
if error:
print("")
print("Please fix errors in configuration file.")
sys.exit(1)
def process_config(config):
"""
Process the configuration from the readable format to a more useful format
"""
# Process sections
for index, section in enumerate(config["sections"]):
# String sections will be converted to type = heading
if isinstance(section, six.string_types):
if section == "-":
config["sections"][index] = {"type": "page-break"}
else:
config["sections"][index] = {"type": "heading", "title": section}
continue
# The rest are just normal sections
section["type"] = "normal"
# Convert ["Label", "Value"] to {"label": "Label",
# "value": "Value"}
if "fields" in section:
fields = []
for fieldColumns in section["fields"]:
fields.append({"label": fieldColumns[0], "value": fieldColumns[1]})
section["fields"] = fields
# Convert arrays in "largeList" field to <ul> -lists in "large"
if "largeList" in section:
section["large"] = (
"<ul><li>" + "</li><li>".join(section["largeList"]) + "</li></ul>"
)
del section["largeList"]
heading = config["mainHeading"]
main_heading = {"type": "heading", "title": heading}
config["sections"] = [main_heading] + config["sections"]
def generate_navigation(config):
i = 1
nav = {"headings": []}
for _, section in enumerate(config["sections"]):
# Page breaks don't need navigation
if section["type"] == "page-break":
continue
name = section["title"]
section["id"] = make_id(name, i)
if section["type"] == "heading":
nav[name] = [section]
nav["headings"].append(name)
heading = name
else:
nav[heading].append(section)
i += 1
return nav
def make_id(text, index):
# Replace characters not valid in IDs
text = re.sub(r"[^0-9a-zA-Z\-_.:]", "-", text)
# Text must not begin with a number
if re.match(r"^[0-9]", text):
text = "id-{}-{}".format(text, index)
return text
def main():
ap = ArgumentParser()
ap.add_argument("--source", default="cv.json", type=str, help="CV JSON source")
ap.add_argument(
"--target", type=str, help="Target directory, defaults to generated/<source>/"
)
ap.add_argument(
"--template",
type=str,
default="default",
help="One of the subfolders of templates/",
)
ap.add_argument(
"--watch",
type=str2bool,
nargs="?",
const=True,
default=False,
help="Keep watching for changes",
)
options = ap.parse_args()
if not options.target:
options.target = str(Path("generated") / options.source)
if options.watch:
print("Press CTRL+C to stop monitoring for changes")
last_change = 0
source_path = Path(options.source)
template_path = TEMPLATE_PATH / options.template
while True:
changes = False
source_change = get_last_change(source_path)
if source_change > last_change:
changes = True
template_change = get_last_change(template_path)
if template_change > last_change:
changes = True
if changes:
last_change = max(template_change, source_change)
try:
run(options)
except Exception as e:
print(e)
except SystemExit:
pass
sleep(0.25)
else:
run(options)
if __name__ == "__main__":
main()
| 28.043103 | 86 | 0.544216 |
from argparse import ArgumentParser, ArgumentTypeError
import codecs
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
import json
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
import re
import sass
import six
import shutil
import sys
from time import sleep
TEMPLATE_PATH = Path("templates")
def str2bool(value):
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError("Boolean value expected.")
def get_last_change(path):
last_changed = path.stat().st_mtime
if path.is_dir():
for entry in path.glob("**/*"):
entry_changed = entry.stat().st_mtime
if entry_changed > last_changed:
last_changed = entry_changed
return last_changed
def run(options):
try:
with codecs.open(options.source, encoding="utf-8") as f:
config = json.load(f)
except ValueError as e:
print("Error parsing config {}.".format(options.source))
print("")
raise
except IOError as e:
print("Configuration file not found: {}".format(options.source))
print("")
raise
validate_config(config)
process_config(config)
generate_cv(options.target, options.template, config)
def generate_cv(destination, template, config):
template_path = str(TEMPLATE_PATH / template)
env = Environment(loader=FileSystemLoader(template_path))
template = env.get_template("index.html")
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S %z")
year = now.strftime("%Y")
navigation = generate_navigation(config)
html = template.render(
name=config["name"],
contact=config["contact"],
sections=config["sections"],
navigation=navigation,
now=current_time,
year=year,
)
dst_path = Path(destination)
if dst_path.exists():
shutil.rmtree(destination)
shutil.copytree(template_path, destination)
scss_files = []
for entry in dst_path.glob("**/*.scss"):
scss_files.append(entry)
entry_name = entry.name
if entry_name.endswith(".scss") and not entry_name.startswith("_"):
entry_str = str(entry)
compiled = sass.compile(filename=entry_str)
entry_css = Path(entry_str[:-5] + ".css")
with entry_css.open("w", encoding="utf-8") as f:
f.write(compiled)
print("Compiled {} to {}".format(entry, entry_css))
for entry in scss_files:
entry.unlink()
for entry in reversed(list(dst_path.rglob("*"))):
if entry.exists() and entry.is_dir():
empty = True
for _ in entry.iterdir():
empty = False
break
if empty:
entry.rmdir()
full_path = Path(destination) / "index.html"
with full_path.open("w", encoding="utf-8") as f:
f.write(html)
print("Generated CV HTML to {}".format(full_path))
def validate_config(config):
error = False
if "name" not in config:
print('Missing name definition, e.g. { "name": "Janne Enberg", ' "... }")
error = True
if "contact" not in config:
print(
"Missing contact definition, e.g. { ..., "
'"contact": "+1 (2) 345 678 | contact@example.com", ... }'
)
error = True
if "sections" not in config:
print("Missing sections definition, e.g. { ..., " '"sections": [ ... ] }')
error = True
else:
for section in config["sections"]:
if isinstance(section, six.string_types):
continue
if "title" not in section:
print(
"Missing title from section definition, , "
'e.g. { ..., "sections": [ {"title": "Section '
'title", ...} ] }'
)
print("Found: {}".format(section))
error = True
if (
"fields" not in section
and "large" not in section
and "largeList" not in section
):
print(
"No fields, largeList or large definition for "
"section, , "
'e.g. { ..., "sections": [ {..., '
'"large": "Yadi yadi yada", ...} ] }'
)
error = True
if "fields" in section:
for field in section["fields"]:
if not isinstance(field, list) or len(field) != 2:
print(
"Invalid field definition, "
"it should have two items, e.g. { ..., "
'"sections": [ {..., "fields": [ ["Label",'
' "Value"], ... }, ... ] }'
)
error = True
if error:
print("")
print("Please fix errors in configuration file.")
sys.exit(1)
def process_config(config):
for index, section in enumerate(config["sections"]):
if isinstance(section, six.string_types):
if section == "-":
config["sections"][index] = {"type": "page-break"}
else:
config["sections"][index] = {"type": "heading", "title": section}
continue
section["type"] = "normal"
if "fields" in section:
fields = []
for fieldColumns in section["fields"]:
fields.append({"label": fieldColumns[0], "value": fieldColumns[1]})
section["fields"] = fields
if "largeList" in section:
section["large"] = (
"<ul><li>" + "</li><li>".join(section["largeList"]) + "</li></ul>"
)
del section["largeList"]
heading = config["mainHeading"]
main_heading = {"type": "heading", "title": heading}
config["sections"] = [main_heading] + config["sections"]
def generate_navigation(config):
i = 1
nav = {"headings": []}
for _, section in enumerate(config["sections"]):
if section["type"] == "page-break":
continue
name = section["title"]
section["id"] = make_id(name, i)
if section["type"] == "heading":
nav[name] = [section]
nav["headings"].append(name)
heading = name
else:
nav[heading].append(section)
i += 1
return nav
def make_id(text, index):
# Replace characters not valid in IDs
text = re.sub(r"[^0-9a-zA-Z\-_.:]", "-", text)
# Text must not begin with a number
if re.match(r"^[0-9]", text):
text = "id-{}-{}".format(text, index)
return text
def main():
ap = ArgumentParser()
ap.add_argument("--source", default="cv.json", type=str, help="CV JSON source")
ap.add_argument(
"--target", type=str, help="Target directory, defaults to generated/<source>/"
)
ap.add_argument(
"--template",
type=str,
default="default",
help="One of the subfolders of templates/",
)
ap.add_argument(
"--watch",
type=str2bool,
nargs="?",
const=True,
default=False,
help="Keep watching for changes",
)
options = ap.parse_args()
if not options.target:
options.target = str(Path("generated") / options.source)
if options.watch:
print("Press CTRL+C to stop monitoring for changes")
last_change = 0
source_path = Path(options.source)
template_path = TEMPLATE_PATH / options.template
while True:
changes = False
source_change = get_last_change(source_path)
if source_change > last_change:
changes = True
template_change = get_last_change(template_path)
if template_change > last_change:
changes = True
if changes:
last_change = max(template_change, source_change)
try:
run(options)
except Exception as e:
print(e)
except SystemExit:
pass
sleep(0.25)
else:
run(options)
if __name__ == "__main__":
main()
| true | true |
f71c190a14841ef42f527cfad6bd4742ebd5bc55 | 929 | py | Python | nlpaug/util/audio/loader.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | 1 | 2021-06-09T20:07:30.000Z | 2021-06-09T20:07:30.000Z | nlpaug/util/audio/loader.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | nlpaug/util/audio/loader.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | try:
import librosa
except ImportError:
# No installation required if not using this function
pass
class AudioLoader:
@staticmethod
def load_audio(file_path):
try:
import librosa
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Missed librosa library. Install import librosa by `pip install librosa`"
)
return librosa.load(file_path)
@staticmethod
def load_mel_spectrogram(file_path, n_mels=128, fmax=8000):
try:
import librosa
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Missed librosa library. Install import librosa by `pip install librosa`"
)
audio, sampling_rate = AudioLoader.load_audio(file_path)
return librosa.feature.melspectrogram(
y=audio, sr=sampling_rate, n_mels=n_mels, fmax=fmax
)
| 28.151515 | 89 | 0.634015 | try:
import librosa
except ImportError:
pass
class AudioLoader:
@staticmethod
def load_audio(file_path):
try:
import librosa
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Missed librosa library. Install import librosa by `pip install librosa`"
)
return librosa.load(file_path)
@staticmethod
def load_mel_spectrogram(file_path, n_mels=128, fmax=8000):
try:
import librosa
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Missed librosa library. Install import librosa by `pip install librosa`"
)
audio, sampling_rate = AudioLoader.load_audio(file_path)
return librosa.feature.melspectrogram(
y=audio, sr=sampling_rate, n_mels=n_mels, fmax=fmax
)
| true | true |
f71c191c5eb6a1641c149fff6ae72d57a8d19cda | 4,286 | py | Python | dataset/DeepFakes/faceswap-master/lib/training_data.py | MrThiago/FaceForensics | 1806e70d0dd2294a12a8afd1c3f59d6ecac639bf | [
"MIT"
] | 1,930 | 2018-04-20T14:52:01.000Z | 2022-03-30T13:53:31.000Z | dataset/DeepFakes/faceswap-master/lib/training_data.py | chrisgorgo/FaceForensics | a815daa9ebb7c12240a4b7162c431af0e1b959fa | [
"MIT"
] | 68 | 2019-02-14T09:09:02.000Z | 2022-03-23T08:55:23.000Z | dataset/DeepFakes/faceswap-master/lib/training_data.py | chrisgorgo/FaceForensics | a815daa9ebb7c12240a4b7162c431af0e1b959fa | [
"MIT"
] | 499 | 2018-04-20T11:27:11.000Z | 2022-03-29T16:29:50.000Z | import cv2
import numpy
from random import shuffle
from .utils import BackgroundGenerator
from .umeyama import umeyama
class TrainingDataGenerator():
def __init__(self, random_transform_args, coverage, scale=5, zoom=1): #TODO thos default should stay in the warp function
self.random_transform_args = random_transform_args
self.coverage = coverage
self.scale = scale
self.zoom = zoom
def minibatchAB(self, images, batchsize):
batch = BackgroundGenerator(self.minibatch(images, batchsize), 1)
for ep1, warped_img, target_img in batch.iterator():
yield ep1, warped_img, target_img
# A generator function that yields epoch, batchsize of warped_img and batchsize of target_img
def minibatch(self, data, batchsize):
length = len(data)
assert length >= batchsize, "Number of images is lower than batch-size (Note that too few images may lead to bad training). # images: {}, batch-size: {}".format(length, batchsize)
epoch = i = 0
shuffle(data)
while True:
size = batchsize
if i+size > length:
shuffle(data)
i = 0
epoch+=1
rtn = numpy.float32([self.read_image(img) for img in data[i:i+size]])
i+=size
yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:]
def color_adjust(self, img):
return img / 255.0
def read_image(self, fn):
try:
image = self.color_adjust(cv2.imread(fn))
except TypeError:
raise Exception("Error while reading image", fn)
image = cv2.resize(image, (256,256))
image = self.random_transform( image, **self.random_transform_args )
warped_img, target_img = self.random_warp( image, self.coverage, self.scale, self.zoom )
return warped_img, target_img
def random_transform(self, image, rotation_range, zoom_range, shift_range, random_flip):
h, w = image.shape[0:2]
rotation = numpy.random.uniform(-rotation_range, rotation_range)
scale = numpy.random.uniform(1 - zoom_range, 1 + zoom_range)
tx = numpy.random.uniform(-shift_range, shift_range) * w
ty = numpy.random.uniform(-shift_range, shift_range) * h
mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
mat[:, 2] += (tx, ty)
result = cv2.warpAffine(
image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)
if numpy.random.random() < random_flip:
result = result[:, ::-1]
return result
# get pair of random warped images from aligned face image
def random_warp(self, image, coverage, scale = 5, zoom = 1):
assert image.shape == (256, 256, 3)
range_ = numpy.linspace(128 - coverage//2, 128 + coverage//2, 5)
mapx = numpy.broadcast_to(range_, (5, 5))
mapy = mapx.T
mapx = mapx + numpy.random.normal(size=(5,5), scale=scale)
mapy = mapy + numpy.random.normal(size=(5,5), scale=scale)
interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')
interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')
warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = numpy.stack([mapx.ravel(), mapy.ravel() ], axis=-1)
dst_points = numpy.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2)
mat = umeyama(src_points, dst_points, True)[0:2]
target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom))
return warped_image, target_image
def stack_images(images):
def get_transpose_axes(n):
if n % 2 == 0:
y_axes = list(range(1, n - 1, 2))
x_axes = list(range(0, n - 1, 2))
else:
y_axes = list(range(0, n - 1, 2))
x_axes = list(range(1, n - 1, 2))
return y_axes, x_axes, [n - 1]
images_shape = numpy.array(images.shape)
new_axes = get_transpose_axes(len(images_shape))
new_shape = [numpy.prod(images_shape[x]) for x in new_axes]
return numpy.transpose(
images,
axes=numpy.concatenate(new_axes)
).reshape(new_shape)
| 40.819048 | 187 | 0.617592 | import cv2
import numpy
from random import shuffle
from .utils import BackgroundGenerator
from .umeyama import umeyama
class TrainingDataGenerator():
def __init__(self, random_transform_args, coverage, scale=5, zoom=1):
self.random_transform_args = random_transform_args
self.coverage = coverage
self.scale = scale
self.zoom = zoom
def minibatchAB(self, images, batchsize):
batch = BackgroundGenerator(self.minibatch(images, batchsize), 1)
for ep1, warped_img, target_img in batch.iterator():
yield ep1, warped_img, target_img
def minibatch(self, data, batchsize):
length = len(data)
assert length >= batchsize, "Number of images is lower than batch-size (Note that too few images may lead to bad training). # images: {}, batch-size: {}".format(length, batchsize)
epoch = i = 0
shuffle(data)
while True:
size = batchsize
if i+size > length:
shuffle(data)
i = 0
epoch+=1
rtn = numpy.float32([self.read_image(img) for img in data[i:i+size]])
i+=size
yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:]
def color_adjust(self, img):
return img / 255.0
def read_image(self, fn):
try:
image = self.color_adjust(cv2.imread(fn))
except TypeError:
raise Exception("Error while reading image", fn)
image = cv2.resize(image, (256,256))
image = self.random_transform( image, **self.random_transform_args )
warped_img, target_img = self.random_warp( image, self.coverage, self.scale, self.zoom )
return warped_img, target_img
def random_transform(self, image, rotation_range, zoom_range, shift_range, random_flip):
h, w = image.shape[0:2]
rotation = numpy.random.uniform(-rotation_range, rotation_range)
scale = numpy.random.uniform(1 - zoom_range, 1 + zoom_range)
tx = numpy.random.uniform(-shift_range, shift_range) * w
ty = numpy.random.uniform(-shift_range, shift_range) * h
mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
mat[:, 2] += (tx, ty)
result = cv2.warpAffine(
image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)
if numpy.random.random() < random_flip:
result = result[:, ::-1]
return result
def random_warp(self, image, coverage, scale = 5, zoom = 1):
assert image.shape == (256, 256, 3)
range_ = numpy.linspace(128 - coverage//2, 128 + coverage//2, 5)
mapx = numpy.broadcast_to(range_, (5, 5))
mapy = mapx.T
mapx = mapx + numpy.random.normal(size=(5,5), scale=scale)
mapy = mapy + numpy.random.normal(size=(5,5), scale=scale)
interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')
interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')
warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = numpy.stack([mapx.ravel(), mapy.ravel() ], axis=-1)
dst_points = numpy.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2)
mat = umeyama(src_points, dst_points, True)[0:2]
target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom))
return warped_image, target_image
def stack_images(images):
def get_transpose_axes(n):
if n % 2 == 0:
y_axes = list(range(1, n - 1, 2))
x_axes = list(range(0, n - 1, 2))
else:
y_axes = list(range(0, n - 1, 2))
x_axes = list(range(1, n - 1, 2))
return y_axes, x_axes, [n - 1]
images_shape = numpy.array(images.shape)
new_axes = get_transpose_axes(len(images_shape))
new_shape = [numpy.prod(images_shape[x]) for x in new_axes]
return numpy.transpose(
images,
axes=numpy.concatenate(new_axes)
).reshape(new_shape)
| true | true |
f71c1b0d0a31e515a655f2a67f62120cc4232d70 | 1,106 | py | Python | setup.py | albertosottile/SmartGadget-gatt | b5b4002f3635afcb97de5106676cc7142b1e9ca5 | [
"MIT"
] | 2 | 2021-06-14T18:08:16.000Z | 2021-08-29T06:48:10.000Z | setup.py | albertosottile/SmartGadget-gatt | b5b4002f3635afcb97de5106676cc7142b1e9ca5 | [
"MIT"
] | null | null | null | setup.py | albertosottile/SmartGadget-gatt | b5b4002f3635afcb97de5106676cc7142b1e9ca5 | [
"MIT"
] | 1 | 2021-08-29T06:48:11.000Z | 2021-08-29T06:48:11.000Z | #!/usr/bin/env python3
import setuptools
def read(fname):
with open(fname, 'r') as f:
return f.read()
setuptools.setup(
name="smartgadget",
version="0.1",
author="Alberto Sottile",
author_email="alby128@gmail.com",
description=' '.join([
'Interact with a Sensirion SHT31 Smart Gadget',
'Development Kit using the Bluetooth GATT SDK for Python'
]),
url="https://github.com/albertosottile/SmartGadget-gatt",
packages=['smartgadget'],
install_requires=read('requirements.txt').splitlines(),
python_requires=">=3.5",
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Education",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering"
],
)
| 30.722222 | 65 | 0.621157 |
import setuptools
def read(fname):
with open(fname, 'r') as f:
return f.read()
setuptools.setup(
name="smartgadget",
version="0.1",
author="Alberto Sottile",
author_email="alby128@gmail.com",
description=' '.join([
'Interact with a Sensirion SHT31 Smart Gadget',
'Development Kit using the Bluetooth GATT SDK for Python'
]),
url="https://github.com/albertosottile/SmartGadget-gatt",
packages=['smartgadget'],
install_requires=read('requirements.txt').splitlines(),
python_requires=">=3.5",
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Education",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering"
],
)
| true | true |
f71c1b1f9c2288ed03520cd964f1b666fcfab9a4 | 1,302 | py | Python | CV_A4_/A4_compute_descriptors.py | pseudowasabi/computer-vision-exercises | 34b7c8402c32dbb00e484f90780ebb6546a3f8dc | [
"MIT"
] | null | null | null | CV_A4_/A4_compute_descriptors.py | pseudowasabi/computer-vision-exercises | 34b7c8402c32dbb00e484f90780ebb6546a3f8dc | [
"MIT"
] | null | null | null | CV_A4_/A4_compute_descriptors.py | pseudowasabi/computer-vision-exercises | 34b7c8402c32dbb00e484f90780ebb6546a3f8dc | [
"MIT"
] | null | null | null | '''
Computer vision assignment 4 by Yoseob Kim
A4_compute_descriptors.py
Compute similarity-reflected image descriptors with L1, L2 norm distances by using SIFT descriptors.
* Status: (working on it)
* GitHub Link: https://github.com/pseudowasabi/computer-vision-exercises/tree/master/CV_A4_
'''
import cv2
import numpy as np
import math
import time
import operator
import random
img = cv2.imread('ukbench00000.jpg', cv2.IMREAD_GRAYSCALE)
'''
my_min = np.inf
my_max = 0'''
for i in range(1000):
offset = '00' if i < 10 else '0' if i < 100 else ''
offset += str(i)
#print(offset)
f = open('./sift/sift100'+offset, 'rb')
# reference - https://numpy.org/doc/stable/reference/generated/numpy.frombuffer.html
sift_des = np.frombuffer(f.read(), dtype=np.uint8)
#print(sift_des.shape)
#print(sift_des)
'''
if sift_des.shape[0] % 128 != 0:
print('divide error')
'''
sift_des_reshaped = np.reshape(sift_des, (sift_des.shape[0] // 128, 128))
#print(sift_des_reshaped.shape)
'''
if sift_des_reshaped.shape[0] < my_min:
my_min = sift_des_reshaped.shape[0]
if sift_des_reshaped.shape[0] > my_max:
my_max = sift_des_reshaped.shape[0]'''
f.close()
#print(my_min, my_max)
# N size
# min = 73, max = 2388
| 22.842105 | 100 | 0.669739 |
import cv2
import numpy as np
import math
import time
import operator
import random
img = cv2.imread('ukbench00000.jpg', cv2.IMREAD_GRAYSCALE)
for i in range(1000):
offset = '00' if i < 10 else '0' if i < 100 else ''
offset += str(i)
f = open('./sift/sift100'+offset, 'rb')
sift_des = np.frombuffer(f.read(), dtype=np.uint8)
sift_des_reshaped = np.reshape(sift_des, (sift_des.shape[0] // 128, 128))
f.close()
| true | true |
f71c1c5ae6f65d71e66399bf4776b2b7437ab5a8 | 713 | py | Python | audiophiler/util.py | Mstrodl/audiophiler | 2e3a8299b58ab92a851cae726cc9184a2dad05f8 | [
"MIT"
] | 5 | 2017-05-08T05:07:22.000Z | 2021-06-01T18:48:30.000Z | audiophiler/util.py | Mstrodl/audiophiler | 2e3a8299b58ab92a851cae726cc9184a2dad05f8 | [
"MIT"
] | 34 | 2017-09-11T19:18:40.000Z | 2021-08-28T21:38:15.000Z | audiophiler/util.py | Mstrodl/audiophiler | 2e3a8299b58ab92a851cae726cc9184a2dad05f8 | [
"MIT"
] | 20 | 2017-09-09T22:02:11.000Z | 2021-08-28T17:45:59.000Z | # File: util.py
# Audiophiler utility functions
# Credit to Liam Middlebrook and Ram Zallan
# https://github.com/liam-middlebrook/gallery
from functools import wraps
from flask import session
from audiophiler.models import Tour
def audiophiler_auth(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
uuid = str(session["userinfo"].get("sub", ""))
uid = str(session["userinfo"].get("preferred_username", ""))
auth_dict = {
"uuid": uuid,
"uid": uid
}
kwargs["auth_dict"] = auth_dict
return func(*args, **kwargs)
return wrapped_function
def get_tour_lock_status():
lock = Tour.query.first()
return lock.tour_lock
| 27.423077 | 68 | 0.652174 |
from functools import wraps
from flask import session
from audiophiler.models import Tour
def audiophiler_auth(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
uuid = str(session["userinfo"].get("sub", ""))
uid = str(session["userinfo"].get("preferred_username", ""))
auth_dict = {
"uuid": uuid,
"uid": uid
}
kwargs["auth_dict"] = auth_dict
return func(*args, **kwargs)
return wrapped_function
def get_tour_lock_status():
lock = Tour.query.first()
return lock.tour_lock
| true | true |
f71c1c8decca6e84cea60dec4d962fc15cc6ae66 | 4,251 | py | Python | qutip/hardware_info.py | kiuthed/qutip | b6fb8e5bbd9ffeae117b54e56313e8617038deab | [
"BSD-3-Clause"
] | null | null | null | qutip/hardware_info.py | kiuthed/qutip | b6fb8e5bbd9ffeae117b54e56313e8617038deab | [
"BSD-3-Clause"
] | null | null | null | qutip/hardware_info.py | kiuthed/qutip | b6fb8e5bbd9ffeae117b54e56313e8617038deab | [
"BSD-3-Clause"
] | null | null | null | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['hardware_info']
import os
import sys
def _mac_hardware_info():
info = dict()
results = dict()
for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:20]]:
info[l[0].strip(' "').replace(' ', '_').lower().strip('hw.')] = \
l[1].strip('.\n ')
results.update({'cpus': int(info['physicalcpu'])})
results.update({'cpu_freq': int(info['cpufrequency']) / (1000. ** 3)})
results.update({'memsize': int(info['memsize']) / (1024 ** 2)})
# add OS information
results.update({'os': 'Mac OSX'})
return results
def _linux_hardware_info():
results = {}
# get cpu number
cpu_info = dict()
for l in [l.split(':') for l in os.popen('lscpu').readlines()]:
cpu_info[l[0]] = l[1].strip('.\n ').strip('kB')
sockets = int(cpu_info['Socket(s)'])
cores_per_socket = int(cpu_info['Core(s) per socket'])
results.update({'cpus': sockets * cores_per_socket})
# get cpu frequency directly (bypasses freq scaling)
try:
file = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
cpu_freq = open(file).readlines()[0]
cpu_freq = float(cpu_freq.strip('\n'))
results.update({'cpu_freq': cpu_freq / (1000. ** 2)})
except:
cpu_freq = float(cpu_info['CPU MHz']) / 1000.
results.update({'cpu_freq': cpu_freq})
# get total amount of memory
mem_info = dict()
for l in [l.split(':') for l in open("/proc/meminfo").readlines()]:
mem_info[l[0]] = l[1].strip('.\n ').strip('kB')
results.update({'memsize': int(mem_info['MemTotal']) / 1024})
# add OS information
results.update({'os': 'Linux'})
return results
def _win_hardware_info():
return {'os': 'Windows'}
def hardware_info():
"""
Returns basic hardware information about the computer.
Gives actual number of CPU's in the machine, even when hyperthreading is
turned on.
Returns
-------
info : dict
Dictionary containing cpu and memory information.
"""
try:
if sys.platform == 'darwin':
out = _mac_hardware_info()
elif sys.platform == 'win32':
out = _win_hardware_info()
elif sys.platform in ['linux', 'linux2']:
out = _linux_hardware_info()
else:
out = {}
except:
return {}
else:
return out
if __name__ == '__main__':
print(hardware_info())
| 36.646552 | 79 | 0.638908 | true | true | |
f71c1cb01d78b077e343818c4290f9abafc4925e | 1,900 | py | Python | cron_descriptor/GetText.py | nathmo/cron-descriptor | 2475065be9e203ed5cea49ec6ca365384f433cb6 | [
"MIT"
] | 3 | 2018-12-11T18:51:36.000Z | 2019-10-16T19:10:19.000Z | cron_descriptor/GetText.py | nathmo/cron-descriptor | 2475065be9e203ed5cea49ec6ca365384f433cb6 | [
"MIT"
] | 2 | 2019-04-14T04:14:31.000Z | 2019-10-15T03:23:54.000Z | cron_descriptor/cron_descriptor/GetText.py | michaelblyons/SublimeSyntax-Crontab | 54f1fa7ff0c9d18aea3790555dba6e533ce3749b | [
"MIT"
] | 2 | 2019-04-11T06:13:54.000Z | 2019-10-04T02:49:58.000Z | # The MIT License (MIT)
#
# Copyright (c) 2016 Adam Schubert
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gettext
import os
import logging
logger = logging.getLogger(__name__)
class GetText(object):
"""
Handles language translations and Initializes global _() function
"""
def __init__(self, locale_code):
"""
Initialize GetText
:param locale_code selected locale
"""
try:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'locale', '{}.mo'.format(locale_code))
trans = gettext.GNUTranslations(open(filename, "rb"))
logger.debug('{} Loaded'.format(filename))
except IOError:
logger.debug('Failed to find locale {}'.format(locale_code))
trans = gettext.NullTranslations()
trans.install()
| 36.538462 | 80 | 0.7 |
import gettext
import os
import logging
logger = logging.getLogger(__name__)
class GetText(object):
def __init__(self, locale_code):
try:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'locale', '{}.mo'.format(locale_code))
trans = gettext.GNUTranslations(open(filename, "rb"))
logger.debug('{} Loaded'.format(filename))
except IOError:
logger.debug('Failed to find locale {}'.format(locale_code))
trans = gettext.NullTranslations()
trans.install()
| true | true |
f71c1cd7ddd442ab1b3e8652cdd667ec554c98f4 | 23,533 | py | Python | adnmtf/nmtf_core.py | Advestis/adnmtf | 7b36da64669894506071a75d8bd341edb0e75b9f | [
"MIT"
] | null | null | null | adnmtf/nmtf_core.py | Advestis/adnmtf | 7b36da64669894506071a75d8bd341edb0e75b9f | [
"MIT"
] | null | null | null | adnmtf/nmtf_core.py | Advestis/adnmtf | 7b36da64669894506071a75d8bd341edb0e75b9f | [
"MIT"
] | null | null | null | """Non-negative matrix and tensor factorization core functions
"""
# Author: Paul Fogel
# License: MIT
# Jan 4, '20
from typing import Tuple
import numpy as np
from .nmtf_utils import EPSILON, sparse_opt
import logging
logger = logging.getLogger(__name__)
# TODO (pcotte): typing
# TODO (pcotte): docstrings (with parameters and returns)
def ntf_stack(m, mmis, n_blocks):
"""Unfold tensor M
for future use with NMF
"""
n, p = m.shape
mmis = mmis.astype(np.int)
n_mmis = mmis.shape[0]
n_blocks = int(n_blocks)
mstacked = np.zeros((int(n * p / n_blocks), n_blocks))
if n_mmis > 0:
mmis_stacked = np.zeros((int(n * p / n_blocks), n_blocks))
else:
mmis_stacked = np.array([])
for i_block in range(0, n_blocks):
for j in range(0, int(p / n_blocks)):
i1 = j * n
i2 = i1 + n
mstacked[i1:i2, i_block] = m[:, int(i_block * p / n_blocks + j)]
if n_mmis > 0:
mmis_stacked[i1:i2, i_block] = mmis[:, int(i_block * p / n_blocks + j)]
return mstacked, mmis_stacked
def ntf_solve(
m,
mmis,
mt0,
mw0,
mb0,
nc,
tolerance,
log_iter,
status0,
max_iterations,
nmf_fix_user_lhe,
nmf_fix_user_rhe,
nmf_fix_user_bhe,
nmf_sparse_level,
ntf_unimodal,
ntf_smooth,
ntf_left_components,
ntf_right_components,
ntf_block_components,
n_blocks,
nmf_priors,
my_status_box,
):
"""Interface to:
- NTFSolve_simple
"""
if len(nmf_priors) > 0:
n_nmf_priors, nc = nmf_priors.shape
else:
n_nmf_priors = 0
if n_nmf_priors > 0:
nmf_priors[nmf_priors > 0] = 1
return ntf_solve_simple(
m=m,
mmis=mmis,
mt0=mt0,
mw0=mw0,
mb0=mb0,
nc=nc,
tolerance=tolerance,
log_iter=log_iter,
status0=status0,
max_iterations=max_iterations,
nmf_fix_user_lhe=nmf_fix_user_lhe,
nmf_fix_user_rhe=nmf_fix_user_rhe,
nmf_fix_user_bhe=nmf_fix_user_bhe,
nmf_sparse_level=nmf_sparse_level,
ntf_unimodal=ntf_unimodal,
ntf_smooth=ntf_smooth,
ntf_left_components=ntf_left_components,
ntf_right_components=ntf_right_components,
ntf_block_components=ntf_block_components,
n_blocks=n_blocks,
nmf_priors=nmf_priors,
my_status_box=my_status_box,
)
def ntf_solve_simple(
m,
mmis,
mt0,
mw0,
mb0,
nc,
tolerance,
log_iter,
status0,
max_iterations,
nmf_fix_user_lhe,
nmf_fix_user_rhe,
nmf_fix_user_bhe,
nmf_sparse_level,
ntf_unimodal,
ntf_smooth,
ntf_left_components,
ntf_right_components,
ntf_block_components,
n_blocks,
nmf_priors,
my_status_box,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]:
"""
Estimate NTF matrices (HALS)
Parameters
----------
m: Input matrix
mmis: Define missing values (0 = missing cell, 1 = real cell)
mt0: Initial left hand matrix
mw0: Initial right hand matrix
mb0: Initial block hand matrix
nc: NTF rank
tolerance: Convergence threshold
log_iter: Log results through iterations
status0: Initial displayed status to be updated during iterations
max_iterations: Max iterations
nmf_fix_user_lhe: = 1 => fixed left hand matrix columns
nmf_fix_user_rhe: = 1 => fixed right hand matrix columns
nmf_fix_user_bhe: = 1 => fixed block hand matrix columns
nmf_sparse_level: sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse
ntf_unimodal: Apply Unimodal constraint on factoring vectors
ntf_smooth: Apply Smooth constraint on factoring vectors
ntf_left_components: Apply Unimodal/Smooth constraint on left hand matrix
ntf_right_components: Apply Unimodal/Smooth constraint on right hand matrix
ntf_block_components: Apply Unimodal/Smooth constraint on block hand matrix
n_blocks: Number of NTF blocks
nmf_priors: Elements in mw that should be updated (others remain 0)
my_status_box
Returns
-------
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]\n
* mt: Left hand matrix\n
* mw: Right hand matrix\n
* mb: Block hand matrix\n
* diff: objective cost\n
* cancel_pressed\n
Reference
---------
a. Cichocki, P.H.a.N. Anh-Huym, Fast local algorithms for large scale nonnegative matrix and tensor factorizations,
IEICE Trans. Fundam. Electron. Commun. Comput. Sci. 92 (3) (2009) 708–721.
"""
cancel_pressed = 0
n, p0 = m.shape
n_mmis = mmis.shape[0]
nc = int(nc)
n_blocks = int(n_blocks)
p = int(p0 / n_blocks)
nxp = int(n * p)
nxp0 = int(n * p0)
mt = np.copy(mt0)
mw = np.copy(mw0)
mb = np.copy(mb0)
# step_iter = math.ceil(MaxIterations/10)
step_iter = 1
pbar_step = 100 * step_iter / max_iterations
id_blockp = np.arange(0, (n_blocks - 1) * p + 1, p)
a = np.zeros(n)
b = np.zeros(p)
c = np.zeros(n_blocks)
alpha = np.zeros(nc)
# Compute Residual tensor
mfit = np.zeros((n, p0))
for k in range(0, nc):
if n_blocks > 1:
for i_block in range(0, n_blocks):
mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
denomt = np.zeros(n)
denomw = np.zeros(p)
denom_block = np.zeros((n_blocks, nc))
mt2 = np.zeros(n)
mw2 = np.zeros(p)
mt_mw = np.zeros(nxp)
denom_cutoff = 0.1
if n_mmis > 0:
mres = (m - mfit) * mmis
else:
mres = m - mfit
my_status_box.init_bar()
# Loop
cont = 1
i_iter = 0
diff0 = 1.0e99
mpart = np.zeros((n, p0))
if abs(nmf_sparse_level) < 1:
alpha[0] = nmf_sparse_level * 0.8
else:
alpha[0] = nmf_sparse_level
percent_zeros = 0
iter_sparse = 0
while (cont > 0) & (i_iter < max_iterations):
for k in range(0, nc):
(
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
) = ntf_update(
n_blocks=n_blocks,
mpart=mpart,
id_blockp=id_blockp,
p=p,
mb=mb,
k=k,
mt=mt,
n=n,
mw=mw,
n_mmis=n_mmis,
mmis=mmis,
mres=mres,
nmf_fix_user_lhe=nmf_fix_user_lhe,
denomt=denomt,
mw2=mw2,
denom_cutoff=denom_cutoff,
alpha=alpha,
ntf_unimodal=ntf_unimodal,
ntf_left_components=ntf_left_components,
ntf_smooth=ntf_smooth,
a=a,
nmf_fix_user_rhe=nmf_fix_user_rhe,
denomw=denomw,
mt2=mt2,
ntf_right_components=ntf_right_components,
b=b,
nmf_fix_user_bhe=nmf_fix_user_bhe,
mt_mw=mt_mw,
nxp=nxp,
denom_block=denom_block,
ntf_block_components=ntf_block_components,
c=c,
mfit=mfit,
nmf_priors=nmf_priors,
)
if i_iter % step_iter == 0:
# Check convergence
diff = np.linalg.norm(mres) ** 2 / nxp0
if (diff0 - diff) / diff0 < tolerance:
cont = 0
else:
if diff > diff0:
my_status_box.my_print(f"{status0} Iter: {i_iter} MSR does not improve")
diff0 = diff
Status = f"{status0} Iteration: {i_iter}"
if nmf_sparse_level != 0:
Status = f"{Status} ; Achieved sparsity: {round(percent_zeros, 2)}; alpha: {round(alpha[0], 2)}"
if log_iter == 1:
my_status_box.my_print(Status)
my_status_box.update_status(status=Status)
my_status_box.update_bar(step=pbar_step)
if my_status_box.cancel_pressed:
cancel_pressed = 1
return np.array([]), mt, mw, mb, mres, cancel_pressed
if log_iter == 1:
my_status_box.my_print(status0 + " Iter: " + str(i_iter) + " MSR: " + str(diff))
i_iter += 1
if cont == 0 or i_iter == max_iterations or (cont == 0 and abs(nmf_sparse_level) == 1):
if 0 < nmf_sparse_level < 1:
sparse_test = np.zeros((nc, 1))
percent_zeros0 = percent_zeros
for k in range(0, nc):
sparse_test[k] = np.where(mw[:, k] == 0)[0].size
percent_zeros = np.mean(sparse_test) / p
if percent_zeros < percent_zeros0:
iter_sparse += 1
else:
iter_sparse = 0
if (percent_zeros < 0.99 * nmf_sparse_level) & (iter_sparse < 50):
alpha[0] *= min(1.05 * nmf_sparse_level / percent_zeros, 1.1)
if alpha[0] < 1:
i_iter = 0
cont = 1
elif 0 > nmf_sparse_level > -1:
sparse_test = np.zeros((nc, 1))
percent_zeros0 = percent_zeros
for k in range(0, nc):
sparse_test[k] = np.where(mt[:, k] == 0)[0].size
percent_zeros = np.mean(sparse_test) / n
if percent_zeros < percent_zeros0:
iter_sparse += 1
else:
iter_sparse = 0
if (percent_zeros < 0.99 * abs(nmf_sparse_level)) & (iter_sparse < 50):
alpha[0] *= min(1.05 * abs(nmf_sparse_level) / percent_zeros, 1.1)
if abs(alpha[0]) < 1:
i_iter = 0
cont = 1
elif abs(alpha[0]) == 1:
if alpha[0] == -1:
for k in range(0, nc):
if np.max(mt[:, k]) > 0:
hhi = int(
np.round(
(np.linalg.norm(mt[:, k], ord=1) / (np.linalg.norm(mt[:, k], ord=2) + EPSILON))
** 2,
decimals=0,
)
)
alpha[k] = -1 - (n - hhi) / (n - 1)
else:
alpha[k] = 0
else:
for k in range(0, nc):
if np.max(mw[:, k]) > 0:
hhi = int(
np.round(
(np.linalg.norm(mw[:, k], ord=1) / (np.linalg.norm(mw[:, k], ord=2) + EPSILON))
** 2,
decimals=0,
)
)
alpha[k] = 1 + (p - hhi) / (p - 1)
else:
alpha[k] = 0
if alpha[0] <= -1:
alpha_real = -(alpha + 1)
# noinspection PyTypeChecker
alpha_min = min(alpha_real)
for k in range(0, nc):
# noinspection PyUnresolvedReferences
alpha[k] = min(alpha_real[k], 2 * alpha_min)
alpha[k] = -alpha[k] - 1
else:
alpha_real = alpha - 1
alpha_min = min(alpha_real)
for k in range(0, nc):
alpha[k] = min(alpha_real[k], 2 * alpha_min)
alpha[k] = alpha[k] + 1
i_iter = 0
cont = 1
diff0 = 1.0e99
for k in range(0, nc):
hhi = np.round((np.linalg.norm(mt[:, k], ord=1) / np.linalg.norm(mt[:, k], ord=2)) ** 2, decimals=0)
logger.info(f"component: {k}, left hhi: {hhi}")
hhi = np.round((np.linalg.norm(mw[:, k], ord=1) / np.linalg.norm(mw[:, k], ord=2)) ** 2, decimals=0)
logger.info(f"component: {k} right hhi: {hhi}")
if (n_mmis > 0) & (nmf_fix_user_bhe == 0):
mb *= denom_block
# TODO (pcotte): mt and mw can be not yet referenced: fix that
return np.array([]), mt, mw, mb, diff, cancel_pressed
def ntf_update(
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
):
"""Core updating code called by NTFSolve_simple & NTF Solve_conv
Input:
All variables in the calling function used in the function
Output:
Same as Input
"""
if len(nmf_priors) > 0:
n_nmf_priors, nc = nmf_priors.shape
else:
n_nmf_priors = 0
# Compute kth-part
if n_blocks > 1:
for i_block in range(0, n_blocks):
mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] = (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mpart[:, id_blockp[0]: id_blockp[0] + p] = np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
if n_mmis > 0:
mpart *= mmis
mpart += mres
if nmf_fix_user_bhe > 0:
norm_bhe = True
if nmf_fix_user_rhe == 0:
norm_lhe = True
norm_rhe = False
else:
norm_lhe = False
norm_rhe = True
else:
norm_bhe = False
norm_lhe = True
norm_rhe = True
if (nmf_fix_user_lhe > 0) & norm_lhe:
norm = np.linalg.norm(mt[:, k])
if norm > 0:
mt[:, k] /= norm
if (nmf_fix_user_rhe > 0) & norm_rhe:
norm = np.linalg.norm(mw[:, k])
if norm > 0:
mw[:, k] /= norm
if (nmf_fix_user_bhe > 0) & norm_bhe & (n_blocks > 1):
norm = np.linalg.norm(mb[:, k])
if norm > 0:
mb[:, k] /= norm
if nmf_fix_user_lhe == 0:
# Update Mt
mt[:, k] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mt[:, k] += mb[i_block, k] * mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw[:, k]
else:
mt[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p] @ mw[:, k]
if n_mmis > 0:
denomt[:] = 0
mw2[:] = mw[:, k] ** 2
if n_blocks > 1:
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mw to calculate Mw.T * Mw
denomt += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw2
else:
denomt += mmis[:, id_blockp[0]: id_blockp[0] + p] @ mw2
denomt /= np.max(denomt)
denomt[denomt < denom_cutoff] = denom_cutoff
mt[:, k] /= denomt
mt[mt[:, k] < 0, k] = 0
if alpha[0] < 0:
if alpha[0] <= -1:
if (alpha[0] == -1) & (np.max(mt[:, k]) > 0):
t_threshold = mt[:, k]
hhi = int(
np.round(
(np.linalg.norm(t_threshold, ord=1) / (np.linalg.norm(t_threshold, ord=2) + EPSILON)) ** 2,
decimals=0,
)
)
t_rank = np.argsort(t_threshold)
t_threshold[t_rank[0: n - hhi]] = 0
else:
mt[:, k] = sparse_opt(mt[:, k], -alpha[k] - 1, False)
else:
mt[:, k] = sparse_opt(mt[:, k], -alpha[0], False)
if (ntf_unimodal > 0) & (ntf_left_components > 0):
# Enforce unimodal distribution
tmax = np.argmax(mt[:, k])
for i in range(tmax + 1, n):
mt[i, k] = min(mt[i - 1, k], mt[i, k])
for i in range(tmax - 1, -1, -1):
mt[i, k] = min(mt[i + 1, k], mt[i, k])
if (ntf_smooth > 0) & (ntf_left_components > 0):
# Smooth distribution
a[0] = 0.75 * mt[0, k] + 0.25 * mt[1, k]
a[n - 1] = 0.25 * mt[n - 2, k] + 0.75 * mt[n - 1, k]
for i in range(1, n - 1):
a[i] = 0.25 * mt[i - 1, k] + 0.5 * mt[i, k] + 0.25 * mt[i + 1, k]
mt[:, k] = a
if norm_lhe:
norm = np.linalg.norm(mt[:, k])
if norm > 0:
mt[:, k] /= norm
if nmf_fix_user_rhe == 0:
# Update Mw
mw[:, k] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mw[:, k] += mpart[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt[:, k] * mb[i_block, k]
else:
mw[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p].T @ mt[:, k]
if n_mmis > 0:
denomw[:] = 0
mt2[:] = mt[:, k] ** 2
if n_blocks > 1:
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mw to calculate Mt.T * Mt
denomw += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt2
else:
denomw += mmis[:, id_blockp[0]: id_blockp[0] + p].T @ mt2
denomw /= np.max(denomw)
denomw[denomw < denom_cutoff] = denom_cutoff
mw[:, k] /= denomw
mw[mw[:, k] < 0, k] = 0
if alpha[0] > 0:
if alpha[0] >= 1:
if (alpha[0] == 1) & (np.max(mw[:, k]) > 0):
w_threshold = mw[:, k]
hhi = int(
np.round(
(np.linalg.norm(w_threshold, ord=1) / (np.linalg.norm(w_threshold, ord=2) + EPSILON)) ** 2,
decimals=0,
)
)
w_rank = np.argsort(w_threshold)
w_threshold[w_rank[0: p - hhi]] = 0
else:
mw[:, k] = sparse_opt(mw[:, k], alpha[k] - 1, False)
else:
mw[:, k] = sparse_opt(mw[:, k], alpha[0], False)
if (ntf_unimodal > 0) & (ntf_right_components > 0):
# Enforce unimodal distribution
wmax = np.argmax(mw[:, k])
for j in range(wmax + 1, p):
mw[j, k] = min(mw[j - 1, k], mw[j, k])
for j in range(wmax - 1, -1, -1):
mw[j, k] = min(mw[j + 1, k], mw[j, k])
if (ntf_smooth > 0) & (ntf_right_components > 0):
# Smooth distribution
b[0] = 0.75 * mw[0, k] + 0.25 * mw[1, k]
b[p - 1] = 0.25 * mw[p - 2, k] + 0.75 * mw[p - 1, k]
for j in range(1, p - 1):
b[j] = 0.25 * mw[j - 1, k] + 0.5 * mw[j, k] + 0.25 * mw[j + 1, k]
mw[:, k] = b
if n_nmf_priors > 0:
mw[:, k] = mw[:, k] * nmf_priors[:, k]
if norm_rhe:
norm = np.linalg.norm(mw[:, k])
if norm > 0:
mw[:, k] /= norm
if nmf_fix_user_bhe == 0:
# Update Mb
mb[:, k] = 0
mt_mw[:] = np.reshape((np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))), nxp)
for i_block in range(0, n_blocks):
mb[i_block, k] = np.reshape(mpart[:, id_blockp[i_block]: id_blockp[i_block] + p], nxp).T @ mt_mw
if n_mmis > 0:
mt_mw[:] = mt_mw[:] ** 2
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mb to calculate Mb.T * Mb
denom_block[i_block, k] = (
np.reshape(mmis[:, id_blockp[i_block]: id_blockp[i_block] + p], (1, nxp)) @ mt_mw
)
maxdenom_block = np.max(denom_block[:, k])
denom_block[denom_block[:, k] < denom_cutoff * maxdenom_block] = denom_cutoff * maxdenom_block
mb[:, k] /= denom_block[:, k]
mb[mb[:, k] < 0, k] = 0
if (ntf_unimodal > 0) & (ntf_block_components > 0):
# Enforce unimodal distribution
bmax = np.argmax(mb[:, k])
for i_block in range(bmax + 1, n_blocks):
mb[i_block, k] = min(mb[i_block - 1, k], mb[i_block, k])
for i_block in range(bmax - 1, -1, -1):
mb[i_block, k] = min(mb[i_block + 1, k], mb[i_block, k])
if (ntf_smooth > 0) & (ntf_block_components > 0):
# Smooth distribution
c[0] = 0.75 * mb[0, k] + 0.25 * mb[1, k]
c[n_blocks - 1] = 0.25 * mb[n_blocks - 2, k] + 0.75 * mb[n_blocks - 1, k]
for i_block in range(1, n_blocks - 1):
c[i_block] = 0.25 * mb[i_block - 1, k] + 0.5 * mb[i_block, k] + 0.25 * mb[i_block + 1, k]
mb[:, k] = c
if norm_bhe:
norm = np.linalg.norm(mb[:, k])
if norm > 0:
mb[:, k] /= norm
# Update residual tensor
mfit[:, :] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
if n_mmis > 0:
mres[:, :] = (mpart - mfit) * mmis
else:
mres[:, :] = mpart - mfit
return (
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
)
| 31.169536 | 119 | 0.470658 |
from typing import Tuple
import numpy as np
from .nmtf_utils import EPSILON, sparse_opt
import logging
logger = logging.getLogger(__name__)
# TODO (pcotte): typing
# TODO (pcotte): docstrings (with parameters and returns)
def ntf_stack(m, mmis, n_blocks):
n, p = m.shape
mmis = mmis.astype(np.int)
n_mmis = mmis.shape[0]
n_blocks = int(n_blocks)
mstacked = np.zeros((int(n * p / n_blocks), n_blocks))
if n_mmis > 0:
mmis_stacked = np.zeros((int(n * p / n_blocks), n_blocks))
else:
mmis_stacked = np.array([])
for i_block in range(0, n_blocks):
for j in range(0, int(p / n_blocks)):
i1 = j * n
i2 = i1 + n
mstacked[i1:i2, i_block] = m[:, int(i_block * p / n_blocks + j)]
if n_mmis > 0:
mmis_stacked[i1:i2, i_block] = mmis[:, int(i_block * p / n_blocks + j)]
return mstacked, mmis_stacked
def ntf_solve(
m,
mmis,
mt0,
mw0,
mb0,
nc,
tolerance,
log_iter,
status0,
max_iterations,
nmf_fix_user_lhe,
nmf_fix_user_rhe,
nmf_fix_user_bhe,
nmf_sparse_level,
ntf_unimodal,
ntf_smooth,
ntf_left_components,
ntf_right_components,
ntf_block_components,
n_blocks,
nmf_priors,
my_status_box,
):
if len(nmf_priors) > 0:
n_nmf_priors, nc = nmf_priors.shape
else:
n_nmf_priors = 0
if n_nmf_priors > 0:
nmf_priors[nmf_priors > 0] = 1
return ntf_solve_simple(
m=m,
mmis=mmis,
mt0=mt0,
mw0=mw0,
mb0=mb0,
nc=nc,
tolerance=tolerance,
log_iter=log_iter,
status0=status0,
max_iterations=max_iterations,
nmf_fix_user_lhe=nmf_fix_user_lhe,
nmf_fix_user_rhe=nmf_fix_user_rhe,
nmf_fix_user_bhe=nmf_fix_user_bhe,
nmf_sparse_level=nmf_sparse_level,
ntf_unimodal=ntf_unimodal,
ntf_smooth=ntf_smooth,
ntf_left_components=ntf_left_components,
ntf_right_components=ntf_right_components,
ntf_block_components=ntf_block_components,
n_blocks=n_blocks,
nmf_priors=nmf_priors,
my_status_box=my_status_box,
)
def ntf_solve_simple(
m,
mmis,
mt0,
mw0,
mb0,
nc,
tolerance,
log_iter,
status0,
max_iterations,
nmf_fix_user_lhe,
nmf_fix_user_rhe,
nmf_fix_user_bhe,
nmf_sparse_level,
ntf_unimodal,
ntf_smooth,
ntf_left_components,
ntf_right_components,
ntf_block_components,
n_blocks,
nmf_priors,
my_status_box,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]:
cancel_pressed = 0
n, p0 = m.shape
n_mmis = mmis.shape[0]
nc = int(nc)
n_blocks = int(n_blocks)
p = int(p0 / n_blocks)
nxp = int(n * p)
nxp0 = int(n * p0)
mt = np.copy(mt0)
mw = np.copy(mw0)
mb = np.copy(mb0)
# step_iter = math.ceil(MaxIterations/10)
step_iter = 1
pbar_step = 100 * step_iter / max_iterations
id_blockp = np.arange(0, (n_blocks - 1) * p + 1, p)
a = np.zeros(n)
b = np.zeros(p)
c = np.zeros(n_blocks)
alpha = np.zeros(nc)
# Compute Residual tensor
mfit = np.zeros((n, p0))
for k in range(0, nc):
if n_blocks > 1:
for i_block in range(0, n_blocks):
mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
denomt = np.zeros(n)
denomw = np.zeros(p)
denom_block = np.zeros((n_blocks, nc))
mt2 = np.zeros(n)
mw2 = np.zeros(p)
mt_mw = np.zeros(nxp)
denom_cutoff = 0.1
if n_mmis > 0:
mres = (m - mfit) * mmis
else:
mres = m - mfit
my_status_box.init_bar()
# Loop
cont = 1
i_iter = 0
diff0 = 1.0e99
mpart = np.zeros((n, p0))
if abs(nmf_sparse_level) < 1:
alpha[0] = nmf_sparse_level * 0.8
else:
alpha[0] = nmf_sparse_level
percent_zeros = 0
iter_sparse = 0
while (cont > 0) & (i_iter < max_iterations):
for k in range(0, nc):
(
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
) = ntf_update(
n_blocks=n_blocks,
mpart=mpart,
id_blockp=id_blockp,
p=p,
mb=mb,
k=k,
mt=mt,
n=n,
mw=mw,
n_mmis=n_mmis,
mmis=mmis,
mres=mres,
nmf_fix_user_lhe=nmf_fix_user_lhe,
denomt=denomt,
mw2=mw2,
denom_cutoff=denom_cutoff,
alpha=alpha,
ntf_unimodal=ntf_unimodal,
ntf_left_components=ntf_left_components,
ntf_smooth=ntf_smooth,
a=a,
nmf_fix_user_rhe=nmf_fix_user_rhe,
denomw=denomw,
mt2=mt2,
ntf_right_components=ntf_right_components,
b=b,
nmf_fix_user_bhe=nmf_fix_user_bhe,
mt_mw=mt_mw,
nxp=nxp,
denom_block=denom_block,
ntf_block_components=ntf_block_components,
c=c,
mfit=mfit,
nmf_priors=nmf_priors,
)
if i_iter % step_iter == 0:
# Check convergence
diff = np.linalg.norm(mres) ** 2 / nxp0
if (diff0 - diff) / diff0 < tolerance:
cont = 0
else:
if diff > diff0:
my_status_box.my_print(f"{status0} Iter: {i_iter} MSR does not improve")
diff0 = diff
Status = f"{status0} Iteration: {i_iter}"
if nmf_sparse_level != 0:
Status = f"{Status} ; Achieved sparsity: {round(percent_zeros, 2)}; alpha: {round(alpha[0], 2)}"
if log_iter == 1:
my_status_box.my_print(Status)
my_status_box.update_status(status=Status)
my_status_box.update_bar(step=pbar_step)
if my_status_box.cancel_pressed:
cancel_pressed = 1
return np.array([]), mt, mw, mb, mres, cancel_pressed
if log_iter == 1:
my_status_box.my_print(status0 + " Iter: " + str(i_iter) + " MSR: " + str(diff))
i_iter += 1
if cont == 0 or i_iter == max_iterations or (cont == 0 and abs(nmf_sparse_level) == 1):
if 0 < nmf_sparse_level < 1:
sparse_test = np.zeros((nc, 1))
percent_zeros0 = percent_zeros
for k in range(0, nc):
sparse_test[k] = np.where(mw[:, k] == 0)[0].size
percent_zeros = np.mean(sparse_test) / p
if percent_zeros < percent_zeros0:
iter_sparse += 1
else:
iter_sparse = 0
if (percent_zeros < 0.99 * nmf_sparse_level) & (iter_sparse < 50):
alpha[0] *= min(1.05 * nmf_sparse_level / percent_zeros, 1.1)
if alpha[0] < 1:
i_iter = 0
cont = 1
elif 0 > nmf_sparse_level > -1:
sparse_test = np.zeros((nc, 1))
percent_zeros0 = percent_zeros
for k in range(0, nc):
sparse_test[k] = np.where(mt[:, k] == 0)[0].size
percent_zeros = np.mean(sparse_test) / n
if percent_zeros < percent_zeros0:
iter_sparse += 1
else:
iter_sparse = 0
if (percent_zeros < 0.99 * abs(nmf_sparse_level)) & (iter_sparse < 50):
alpha[0] *= min(1.05 * abs(nmf_sparse_level) / percent_zeros, 1.1)
if abs(alpha[0]) < 1:
i_iter = 0
cont = 1
elif abs(alpha[0]) == 1:
if alpha[0] == -1:
for k in range(0, nc):
if np.max(mt[:, k]) > 0:
hhi = int(
np.round(
(np.linalg.norm(mt[:, k], ord=1) / (np.linalg.norm(mt[:, k], ord=2) + EPSILON))
** 2,
decimals=0,
)
)
alpha[k] = -1 - (n - hhi) / (n - 1)
else:
alpha[k] = 0
else:
for k in range(0, nc):
if np.max(mw[:, k]) > 0:
hhi = int(
np.round(
(np.linalg.norm(mw[:, k], ord=1) / (np.linalg.norm(mw[:, k], ord=2) + EPSILON))
** 2,
decimals=0,
)
)
alpha[k] = 1 + (p - hhi) / (p - 1)
else:
alpha[k] = 0
if alpha[0] <= -1:
alpha_real = -(alpha + 1)
# noinspection PyTypeChecker
alpha_min = min(alpha_real)
for k in range(0, nc):
# noinspection PyUnresolvedReferences
alpha[k] = min(alpha_real[k], 2 * alpha_min)
alpha[k] = -alpha[k] - 1
else:
alpha_real = alpha - 1
alpha_min = min(alpha_real)
for k in range(0, nc):
alpha[k] = min(alpha_real[k], 2 * alpha_min)
alpha[k] = alpha[k] + 1
i_iter = 0
cont = 1
diff0 = 1.0e99
for k in range(0, nc):
hhi = np.round((np.linalg.norm(mt[:, k], ord=1) / np.linalg.norm(mt[:, k], ord=2)) ** 2, decimals=0)
logger.info(f"component: {k}, left hhi: {hhi}")
hhi = np.round((np.linalg.norm(mw[:, k], ord=1) / np.linalg.norm(mw[:, k], ord=2)) ** 2, decimals=0)
logger.info(f"component: {k} right hhi: {hhi}")
if (n_mmis > 0) & (nmf_fix_user_bhe == 0):
mb *= denom_block
# TODO (pcotte): mt and mw can be not yet referenced: fix that
return np.array([]), mt, mw, mb, diff, cancel_pressed
def ntf_update(
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
):
if len(nmf_priors) > 0:
n_nmf_priors, nc = nmf_priors.shape
else:
n_nmf_priors = 0
# Compute kth-part
if n_blocks > 1:
for i_block in range(0, n_blocks):
mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] = (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mpart[:, id_blockp[0]: id_blockp[0] + p] = np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
if n_mmis > 0:
mpart *= mmis
mpart += mres
if nmf_fix_user_bhe > 0:
norm_bhe = True
if nmf_fix_user_rhe == 0:
norm_lhe = True
norm_rhe = False
else:
norm_lhe = False
norm_rhe = True
else:
norm_bhe = False
norm_lhe = True
norm_rhe = True
if (nmf_fix_user_lhe > 0) & norm_lhe:
norm = np.linalg.norm(mt[:, k])
if norm > 0:
mt[:, k] /= norm
if (nmf_fix_user_rhe > 0) & norm_rhe:
norm = np.linalg.norm(mw[:, k])
if norm > 0:
mw[:, k] /= norm
if (nmf_fix_user_bhe > 0) & norm_bhe & (n_blocks > 1):
norm = np.linalg.norm(mb[:, k])
if norm > 0:
mb[:, k] /= norm
if nmf_fix_user_lhe == 0:
# Update Mt
mt[:, k] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mt[:, k] += mb[i_block, k] * mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw[:, k]
else:
mt[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p] @ mw[:, k]
if n_mmis > 0:
denomt[:] = 0
mw2[:] = mw[:, k] ** 2
if n_blocks > 1:
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mw to calculate Mw.T * Mw
denomt += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw2
else:
denomt += mmis[:, id_blockp[0]: id_blockp[0] + p] @ mw2
denomt /= np.max(denomt)
denomt[denomt < denom_cutoff] = denom_cutoff
mt[:, k] /= denomt
mt[mt[:, k] < 0, k] = 0
if alpha[0] < 0:
if alpha[0] <= -1:
if (alpha[0] == -1) & (np.max(mt[:, k]) > 0):
t_threshold = mt[:, k]
hhi = int(
np.round(
(np.linalg.norm(t_threshold, ord=1) / (np.linalg.norm(t_threshold, ord=2) + EPSILON)) ** 2,
decimals=0,
)
)
t_rank = np.argsort(t_threshold)
t_threshold[t_rank[0: n - hhi]] = 0
else:
mt[:, k] = sparse_opt(mt[:, k], -alpha[k] - 1, False)
else:
mt[:, k] = sparse_opt(mt[:, k], -alpha[0], False)
if (ntf_unimodal > 0) & (ntf_left_components > 0):
# Enforce unimodal distribution
tmax = np.argmax(mt[:, k])
for i in range(tmax + 1, n):
mt[i, k] = min(mt[i - 1, k], mt[i, k])
for i in range(tmax - 1, -1, -1):
mt[i, k] = min(mt[i + 1, k], mt[i, k])
if (ntf_smooth > 0) & (ntf_left_components > 0):
# Smooth distribution
a[0] = 0.75 * mt[0, k] + 0.25 * mt[1, k]
a[n - 1] = 0.25 * mt[n - 2, k] + 0.75 * mt[n - 1, k]
for i in range(1, n - 1):
a[i] = 0.25 * mt[i - 1, k] + 0.5 * mt[i, k] + 0.25 * mt[i + 1, k]
mt[:, k] = a
if norm_lhe:
norm = np.linalg.norm(mt[:, k])
if norm > 0:
mt[:, k] /= norm
if nmf_fix_user_rhe == 0:
# Update Mw
mw[:, k] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mw[:, k] += mpart[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt[:, k] * mb[i_block, k]
else:
mw[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p].T @ mt[:, k]
if n_mmis > 0:
denomw[:] = 0
mt2[:] = mt[:, k] ** 2
if n_blocks > 1:
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mw to calculate Mt.T * Mt
denomw += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt2
else:
denomw += mmis[:, id_blockp[0]: id_blockp[0] + p].T @ mt2
denomw /= np.max(denomw)
denomw[denomw < denom_cutoff] = denom_cutoff
mw[:, k] /= denomw
mw[mw[:, k] < 0, k] = 0
if alpha[0] > 0:
if alpha[0] >= 1:
if (alpha[0] == 1) & (np.max(mw[:, k]) > 0):
w_threshold = mw[:, k]
hhi = int(
np.round(
(np.linalg.norm(w_threshold, ord=1) / (np.linalg.norm(w_threshold, ord=2) + EPSILON)) ** 2,
decimals=0,
)
)
w_rank = np.argsort(w_threshold)
w_threshold[w_rank[0: p - hhi]] = 0
else:
mw[:, k] = sparse_opt(mw[:, k], alpha[k] - 1, False)
else:
mw[:, k] = sparse_opt(mw[:, k], alpha[0], False)
if (ntf_unimodal > 0) & (ntf_right_components > 0):
# Enforce unimodal distribution
wmax = np.argmax(mw[:, k])
for j in range(wmax + 1, p):
mw[j, k] = min(mw[j - 1, k], mw[j, k])
for j in range(wmax - 1, -1, -1):
mw[j, k] = min(mw[j + 1, k], mw[j, k])
if (ntf_smooth > 0) & (ntf_right_components > 0):
# Smooth distribution
b[0] = 0.75 * mw[0, k] + 0.25 * mw[1, k]
b[p - 1] = 0.25 * mw[p - 2, k] + 0.75 * mw[p - 1, k]
for j in range(1, p - 1):
b[j] = 0.25 * mw[j - 1, k] + 0.5 * mw[j, k] + 0.25 * mw[j + 1, k]
mw[:, k] = b
if n_nmf_priors > 0:
mw[:, k] = mw[:, k] * nmf_priors[:, k]
if norm_rhe:
norm = np.linalg.norm(mw[:, k])
if norm > 0:
mw[:, k] /= norm
if nmf_fix_user_bhe == 0:
# Update Mb
mb[:, k] = 0
mt_mw[:] = np.reshape((np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))), nxp)
for i_block in range(0, n_blocks):
mb[i_block, k] = np.reshape(mpart[:, id_blockp[i_block]: id_blockp[i_block] + p], nxp).T @ mt_mw
if n_mmis > 0:
mt_mw[:] = mt_mw[:] ** 2
for i_block in range(0, n_blocks):
# Broadcast missing cells into Mb to calculate Mb.T * Mb
denom_block[i_block, k] = (
np.reshape(mmis[:, id_blockp[i_block]: id_blockp[i_block] + p], (1, nxp)) @ mt_mw
)
maxdenom_block = np.max(denom_block[:, k])
denom_block[denom_block[:, k] < denom_cutoff * maxdenom_block] = denom_cutoff * maxdenom_block
mb[:, k] /= denom_block[:, k]
mb[mb[:, k] < 0, k] = 0
if (ntf_unimodal > 0) & (ntf_block_components > 0):
# Enforce unimodal distribution
bmax = np.argmax(mb[:, k])
for i_block in range(bmax + 1, n_blocks):
mb[i_block, k] = min(mb[i_block - 1, k], mb[i_block, k])
for i_block in range(bmax - 1, -1, -1):
mb[i_block, k] = min(mb[i_block + 1, k], mb[i_block, k])
if (ntf_smooth > 0) & (ntf_block_components > 0):
# Smooth distribution
c[0] = 0.75 * mb[0, k] + 0.25 * mb[1, k]
c[n_blocks - 1] = 0.25 * mb[n_blocks - 2, k] + 0.75 * mb[n_blocks - 1, k]
for i_block in range(1, n_blocks - 1):
c[i_block] = 0.25 * mb[i_block - 1, k] + 0.5 * mb[i_block, k] + 0.25 * mb[i_block + 1, k]
mb[:, k] = c
if norm_bhe:
norm = np.linalg.norm(mb[:, k])
if norm > 0:
mb[:, k] /= norm
# Update residual tensor
mfit[:, :] = 0
if n_blocks > 1:
for i_block in range(0, n_blocks):
mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (
mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
)
else:
mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))
if n_mmis > 0:
mres[:, :] = (mpart - mfit) * mmis
else:
mres[:, :] = mpart - mfit
return (
n_blocks,
mpart,
id_blockp,
p,
mb,
k,
mt,
n,
mw,
n_mmis,
mmis,
mres,
nmf_fix_user_lhe,
denomt,
mw2,
denom_cutoff,
alpha,
ntf_unimodal,
ntf_left_components,
ntf_smooth,
a,
nmf_fix_user_rhe,
denomw,
mt2,
ntf_right_components,
b,
nmf_fix_user_bhe,
mt_mw,
nxp,
denom_block,
ntf_block_components,
c,
mfit,
nmf_priors,
)
| true | true |
f71c1d13e2b4281f5e462dfbd3127b8ee3404a3a | 6,928 | py | Python | actions.py | prateekralhan/Chatbot-Development-using-RASA-Framework-and-Zomato-API---PGD-IIITB-Assignment | 5e33586df2bbe146d950f33e018e9e990031e347 | [
"Apache-2.0"
] | 1 | 2020-05-21T10:20:03.000Z | 2020-05-21T10:20:03.000Z | actions.py | prateekralhan/Chatbot-Development-using-RASA-Framework-and-Zomato-API---PGD-IIITB-Assignment | 5e33586df2bbe146d950f33e018e9e990031e347 | [
"Apache-2.0"
] | null | null | null | actions.py | prateekralhan/Chatbot-Development-using-RASA-Framework-and-Zomato-API---PGD-IIITB-Assignment | 5e33586df2bbe146d950f33e018e9e990031e347 | [
"Apache-2.0"
] | 1 | 2021-12-06T03:10:37.000Z | 2021-12-06T03:10:37.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# importing the necessary libraries
# from rasa_core.actions.action import Action
from rasa_sdk import Action
#from rasa_sdk.forms import ( BooleanFormField, EntityFormField, FormAction, FreeTextFormField )
from rasa_sdk.events import SlotSet
# from rasa_core.events import SlotSet
import zomatopy
import json
# providng the API key
zomato_config={ "user_key":"<Enter your Zomato API key here >"}
result_of_last_query = ""
class ActionSearchRestaurants(Action):
def name(self):
return 'action_restaurant'
def filterRestaurantBasedOnBudget(self, userbudget, allRestaurants):
rangeMin = 0
rangeMax = 100000
# defining the characteristics of the budget.
if userbudget.isdigit():
price = int(userbudget)
if price == 1:
rangeMax = 299
elif price == 2:
rangeMin = 300
rangeMax = 699
elif price == 3:
rangeMin = 700
elif price < 300:
rangeMax = 299
elif price < 700 and price >= 300:
rangeMin = 300
rangeMax = 699
else:
rangeMin = 700
else:
# default budget
rangeMin = 300
rangeMax = 699
index = 0
count = 0
response = ""
global result_of_last_query
result_of_last_query = ""
for restaurant in allRestaurants:
++count
res = "[" + restaurant['restaurant']['user_rating']['aggregate_rating'] + "/5] " + restaurant['restaurant']['name'] + " in " + restaurant['restaurant']['location']['address']
# price_range = str(restaurant['restaurant']['price_range'])
avg_c_2 = restaurant['restaurant']['average_cost_for_two']
# if price_range == "1":
if avg_c_2 <= rangeMax and avg_c_2 >= rangeMin:
# mapbybudget["1"].append(restaurant)
# if userbudget == price_range:
res = restaurant['restaurant']['currency'] + str(restaurant['restaurant']['average_cost_for_two']) + " " + res + "\n"
if(index < 5):
response = response + res
if(index < 10):
result_of_last_query = result_of_last_query + res
index = index + 1
# getting the restaur details using the API
# modifying the search results
# if the no. of result fall short, appending the results of other price range
if index == 0:
response = "Oops! no restaurant found for this query. " + " search results = " + str(count)
elif index < 5:
# we can add restaurants from the higher range but for now i am appending an extra message
response = response + "\n \nFor more results please search in higher budget range...\n \n"
elif index < 10:
result_of_last_query = result_of_last_query + "\n \nFor more results please search in higher budget range...\n \n"
return response
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
budget = tracker.get_slot('budget')
zomato = zomatopy.initialize_app(zomato_config)
location_detail=zomato.get_location(loc, 1)
d1 = json.loads(location_detail)
lat=d1["location_suggestions"][0]["latitude"]
lon=d1["location_suggestions"][0]["longitude"]
#cuisines and their respective cuisine IDs
cuisines_dict={
'american':1,
'mexican':73,
'italian':55,
'chinese':25,
'north indian':50,
'south indian':85
}
results=zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)), 50)
d = json.loads(results)
response=""
if d['results_found'] == 0:
response= "Sorry, we didn't find any results for this query."
else:
# dispatcher.utter_message(str(d))
response = self.filterRestaurantBasedOnBudget(budget, d['restaurants'])
dispatcher.utter_message(str(response))
return [SlotSet('location',loc)]
# list of TIER1 and TIER2 cities
t1_t2_cities = ["Ahmedabad","Bangalore","Chennai","Delhi","Hyderabad","Kolkata","Mumbai","Pune",
"Agra","Ajmer","Aligarh","Amravati","Amritsar","Asansol","Aurangabad",
"Bareilly","Belgaum","Bhavnagar","Bhiwandi","Bhopal","Bhubaneswar",
"Bikaner","Bokaro Steel City","Chandigarh","Coimbatore","Cuttack","Dehradun",
"Dhanbad","Bhilai","Durgapur","Erode","Faridabad","Firozabad","Ghaziabad",
"Gorakhpur","Gulbarga","Guntur","Gurgaon","Guwahati","Hamirpur",
"Gwalior","Hubli-Dharwad","Indore","Jabalpur","Jaipur","Jalandhar","Jammu","Jamnagar","Jamshedpur","Jhansi","Jodhpur",
"Kannur","Kanpur","Kakinada","Kochi","Kottayam","Kolhapur","Kollam","Kota","Kozhikode","Kurnool","Lucknow","Ludhiana",
"Madurai","Malappuram","Mathura","Goa","Mangalore","Meerut",
"Moradabad","Mysore","Nagpur","Nanded","Nashik","Nellore","Noida","Patna","Pondicherry","Purulia Prayagraj","Raipur","Rajkot",
"Rajahmundry","Ranchi","Rourkela","Salem","Sangli","Siliguri","Shimla"
"Solapur","Srinagar","Surat","Thiruvananthapuram","Thrissur","Tiruchirappalli","Tiruppur",
"Ujjain","Bijapur","Vadodara","Varanasi",
"Vasai-Virar City","Vijayawada","Visakhapatnam","Vellore","Warangal"]
t1_t2_cities_list = [x.lower() for x in t1_t2_cities]
# Check if the location exists. using zomato api.if found then save it, else utter not found.
class ActionValidateLocation(Action):
def name(self):
return 'action_check_location'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
city = str(loc)
# dispatcher.utter_message(city)
if city.lower() in t1_t2_cities_list:
return [SlotSet('location_match',"one")]
else:
zomato = zomatopy.initialize_app(zomato_config)
try:
results = zomato.get_city_ID(city)
return [SlotSet('location_match',"one")]
except:
# results = "Sorry, didn’t find any such location. Can you please tell again?" + "-----" + city
# dispatcher.utter_message(city)
return [SlotSet('location_match',"zero")]
# Send email the list of 10 restaurants
class ActionSendEmail(Action):
def name(self):
return 'action_send_email'
def run(self, dispatcher, tracker, domain):
email = tracker.get_slot('email')
# for slack handling
if len(email.split("|")) == 2:
email = email.split("|")[1]
import smtplib
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("<Enter login E-mail ID here>", "<Enter the password here>")
SUBJECT = "Foodbot - Be Foodoholic in Nature !!"
TEXT = "Hi Foodoholic :) !!\nThe details of all the restaurants you inquried \n \n"
message = 'Subject: {}\n\n{}'.format(SUBJECT,TEXT)
global result_of_last_query
message = message + result_of_last_query
try:
s.sendmail("<Enter login E-mail ID here>", str(email), message)
s.quit()
except:
dispatcher.utter_message(email)
result_of_last_query = ""
return [AllSlotsReset()]
from rasa_sdk.events import AllSlotsReset
from rasa_sdk.events import Restarted
class ActionRestarted(Action):
def name(self):
return 'action_restart'
def run(self, dispatcher, tracker, domain):
return[Restarted()]
class ActionSlotReset(Action):
def name(self):
return 'action_slot_reset'
def run(self, dispatcher, tracker, domain):
return[AllSlotsReset()]
| 32.679245 | 177 | 0.705831 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
import zomatopy
import json
zomato_config={ "user_key":"<Enter your Zomato API key here >"}
result_of_last_query = ""
class ActionSearchRestaurants(Action):
def name(self):
return 'action_restaurant'
def filterRestaurantBasedOnBudget(self, userbudget, allRestaurants):
rangeMin = 0
rangeMax = 100000
if userbudget.isdigit():
price = int(userbudget)
if price == 1:
rangeMax = 299
elif price == 2:
rangeMin = 300
rangeMax = 699
elif price == 3:
rangeMin = 700
elif price < 300:
rangeMax = 299
elif price < 700 and price >= 300:
rangeMin = 300
rangeMax = 699
else:
rangeMin = 700
else:
rangeMin = 300
rangeMax = 699
index = 0
count = 0
response = ""
global result_of_last_query
result_of_last_query = ""
for restaurant in allRestaurants:
++count
res = "[" + restaurant['restaurant']['user_rating']['aggregate_rating'] + "/5] " + restaurant['restaurant']['name'] + " in " + restaurant['restaurant']['location']['address']
avg_c_2 = restaurant['restaurant']['average_cost_for_two']
if avg_c_2 <= rangeMax and avg_c_2 >= rangeMin:
res = restaurant['restaurant']['currency'] + str(restaurant['restaurant']['average_cost_for_two']) + " " + res + "\n"
if(index < 5):
response = response + res
if(index < 10):
result_of_last_query = result_of_last_query + res
index = index + 1
if index == 0:
response = "Oops! no restaurant found for this query. " + " search results = " + str(count)
elif index < 5:
response = response + "\n \nFor more results please search in higher budget range...\n \n"
elif index < 10:
result_of_last_query = result_of_last_query + "\n \nFor more results please search in higher budget range...\n \n"
return response
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
budget = tracker.get_slot('budget')
zomato = zomatopy.initialize_app(zomato_config)
location_detail=zomato.get_location(loc, 1)
d1 = json.loads(location_detail)
lat=d1["location_suggestions"][0]["latitude"]
lon=d1["location_suggestions"][0]["longitude"]
cuisines_dict={
'american':1,
'mexican':73,
'italian':55,
'chinese':25,
'north indian':50,
'south indian':85
}
results=zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)), 50)
d = json.loads(results)
response=""
if d['results_found'] == 0:
response= "Sorry, we didn't find any results for this query."
else:
# dispatcher.utter_message(str(d))
response = self.filterRestaurantBasedOnBudget(budget, d['restaurants'])
dispatcher.utter_message(str(response))
return [SlotSet('location',loc)]
# list of TIER1 and TIER2 cities
t1_t2_cities = ["Ahmedabad","Bangalore","Chennai","Delhi","Hyderabad","Kolkata","Mumbai","Pune",
"Agra","Ajmer","Aligarh","Amravati","Amritsar","Asansol","Aurangabad",
"Bareilly","Belgaum","Bhavnagar","Bhiwandi","Bhopal","Bhubaneswar",
"Bikaner","Bokaro Steel City","Chandigarh","Coimbatore","Cuttack","Dehradun",
"Dhanbad","Bhilai","Durgapur","Erode","Faridabad","Firozabad","Ghaziabad",
"Gorakhpur","Gulbarga","Guntur","Gurgaon","Guwahati","Hamirpur",
"Gwalior","Hubli-Dharwad","Indore","Jabalpur","Jaipur","Jalandhar","Jammu","Jamnagar","Jamshedpur","Jhansi","Jodhpur",
"Kannur","Kanpur","Kakinada","Kochi","Kottayam","Kolhapur","Kollam","Kota","Kozhikode","Kurnool","Lucknow","Ludhiana",
"Madurai","Malappuram","Mathura","Goa","Mangalore","Meerut",
"Moradabad","Mysore","Nagpur","Nanded","Nashik","Nellore","Noida","Patna","Pondicherry","Purulia Prayagraj","Raipur","Rajkot",
"Rajahmundry","Ranchi","Rourkela","Salem","Sangli","Siliguri","Shimla"
"Solapur","Srinagar","Surat","Thiruvananthapuram","Thrissur","Tiruchirappalli","Tiruppur",
"Ujjain","Bijapur","Vadodara","Varanasi",
"Vasai-Virar City","Vijayawada","Visakhapatnam","Vellore","Warangal"]
t1_t2_cities_list = [x.lower() for x in t1_t2_cities]
# Check if the location exists. using zomato api.if found then save it, else utter not found.
class ActionValidateLocation(Action):
def name(self):
return 'action_check_location'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
city = str(loc)
# dispatcher.utter_message(city)
if city.lower() in t1_t2_cities_list:
return [SlotSet('location_match',"one")]
else:
zomato = zomatopy.initialize_app(zomato_config)
try:
results = zomato.get_city_ID(city)
return [SlotSet('location_match',"one")]
except:
# results = "Sorry, didn’t find any such location. Can you please tell again?" + "-----" + city
# dispatcher.utter_message(city)
return [SlotSet('location_match',"zero")]
# Send email the list of 10 restaurants
class ActionSendEmail(Action):
def name(self):
return 'action_send_email'
def run(self, dispatcher, tracker, domain):
email = tracker.get_slot('email')
# for slack handling
if len(email.split("|")) == 2:
email = email.split("|")[1]
import smtplib
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("<Enter login E-mail ID here>", "<Enter the password here>")
SUBJECT = "Foodbot - Be Foodoholic in Nature !!"
TEXT = "Hi Foodoholic :) !!\nThe details of all the restaurants you inquried \n \n"
message = 'Subject: {}\n\n{}'.format(SUBJECT,TEXT)
global result_of_last_query
message = message + result_of_last_query
try:
s.sendmail("<Enter login E-mail ID here>", str(email), message)
s.quit()
except:
dispatcher.utter_message(email)
result_of_last_query = ""
return [AllSlotsReset()]
from rasa_sdk.events import AllSlotsReset
from rasa_sdk.events import Restarted
class ActionRestarted(Action):
def name(self):
return 'action_restart'
def run(self, dispatcher, tracker, domain):
return[Restarted()]
class ActionSlotReset(Action):
def name(self):
return 'action_slot_reset'
def run(self, dispatcher, tracker, domain):
return[AllSlotsReset()]
| true | true |
f71c1d88fc7cda064575b3b49230553d31ba1b56 | 9,193 | py | Python | services/ui/paths/label.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | services/ui/paths/label.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | services/ui/paths/label.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# Label REST API
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from fastapi import APIRouter
# NOC modules
from noc.main.models.label import Label
from ..models.label import DefaultLabelItem, FormLabelItem, LabelLabelItem
from ..utils.ref import get_reference
from ..utils.rest.document import DocumentResourceAPI
from ..utils.rest.op import FilterExact, FuncFilter, FilterBool
router = APIRouter()
class LabelAPI(DocumentResourceAPI[Label]):
prefix = "/api/ui/label"
model = Label
list_ops = [
FuncFilter("query", function=lambda qs, values: qs.filter(name__icontains=values[0])),
FuncFilter("id", function=lambda qs, values: qs.filter(name__in=values)),
FilterExact("name"),
FilterBool("enable_agent"),
FilterBool("enable_service"),
FilterBool("enable_serviceprofile"),
FilterBool("enable_managedobject"),
FilterBool("enable_managedobjectprofile"),
FilterBool("enable_administrativedomain"),
FilterBool("enable_authprofile"),
FilterBool("enable_commandsnippet"),
FilterBool("enable_allocationgroup"),
FilterBool("enable_networksegment"),
FilterBool("enable_object"),
FilterBool("enable_objectmodel"),
FilterBool("enable_platform"),
FilterBool("enable_resourcegroup"),
FilterBool("enable_sensor"),
FilterBool("enable_sensorprofile"),
FilterBool("enable_subscriber"),
FilterBool("enable_subscriberprofile"),
FilterBool("enable_supplier"),
FilterBool("enable_supplierprofile"),
FilterBool("enable_dnszone"),
FilterBool("enable_dnszonerecord"),
FilterBool("enable_division"),
FilterBool("enable_kbentry"),
FilterBool("enable_ipaddress"),
FilterBool("enable_addressprofile"),
FilterBool("enable_ipaddressrange"),
FilterBool("enable_ipprefix"),
FilterBool("enable_prefixprofile"),
FilterBool("enable_vrf"),
FilterBool("enable_vrfgroup"),
FilterBool("enable_asn"),
FilterBool("enable_peer"),
FilterBool("enable_vc"),
FilterBool("enable_vlan"),
FilterBool("enable_vlanprofile"),
FilterBool("enable_vpn"),
FilterBool("enable_slaprobe"),
FilterBool("enable_slaprofile"),
FilterBool("enable_alarm"),
FilterBool("enable_vpnprofile"),
]
@classmethod
def item_to_label(cls, item: Label) -> LabelLabelItem:
return LabelLabelItem(
id=str(item.name),
label=str(item.name),
is_protected=item.is_protected,
scope=item.name.rsplit("::", 1)[0] if item.is_scoped else "",
value=item.name.split("::")[-1],
bg_color1=f"#{item.bg_color1:06x}",
fg_color1=f"#{item.fg_color1:06x}",
bg_color2=f"#{item.bg_color2:06x}",
fg_color2=f"#{item.fg_color2:06x}",
)
@classmethod
def item_to_default(cls, item: Label) -> DefaultLabelItem:
return DefaultLabelItem(
id=str(item.id),
name=str(item.name),
description=item.description,
bg_color1=item.bg_color1,
fg_color1=item.fg_color1,
bg_color2=item.bg_color2,
fg_color2=item.fg_color2,
is_protected=item.is_protected,
is_autogenerated=item.is_autogenerated,
# Label scope
enable_agent=bool(item.enable_agent),
enable_service=bool(item.enable_service),
enable_serviceprofile=bool(item.enable_serviceprofile),
enable_managedobject=bool(item.enable_managedobject),
enable_managedobjectprofile=bool(item.enable_managedobjectprofile),
enable_administrativedomain=bool(item.enable_administrativedomain),
enable_authprofile=bool(item.enable_authprofile),
enable_commandsnippet=bool(item.enable_commandsnippet),
enable_allocationgroup=bool(item.enable_allocationgroup),
enable_networksegment=bool(item.enable_networksegment),
enable_object=bool(item.enable_object),
enable_objectmodel=bool(item.enable_objectmodel),
enable_platform=bool(item.enable_platform),
enable_resourcegroup=bool(item.enable_resourcegroup),
enable_sensor=bool(item.enable_sensor),
enable_sensorprofile=bool(item.enable_sensorprofile),
enable_subscriber=bool(item.enable_subscriber),
enable_subscriberprofile=bool(item.enable_subscriberprofile),
enable_supplier=bool(item.enable_supplier),
enable_supplierprofile=bool(item.enable_supplierprofile),
enable_dnszone=bool(item.enable_dnszone),
enable_dnszonerecord=bool(item.enable_dnszonerecord),
enable_division=bool(item.enable_division),
enable_kbentry=bool(item.enable_kbentry),
enable_ipaddress=bool(item.enable_ipaddress),
enable_addressprofile=bool(item.enable_addressprofile),
enable_ipaddressrange=bool(item.enable_ipaddressrange),
enable_ipprefix=bool(item.enable_ipprefix),
enable_prefixprofile=bool(item.enable_prefixprofile),
enable_vrf=bool(item.enable_vrf),
enable_vrfgroup=bool(item.enable_vrfgroup),
enable_asn=bool(item.enable_asn),
enable_assetpeer=bool(item.enable_assetpeer),
enable_peer=bool(item.enable_peer),
enable_vc=bool(item.enable_vc),
enable_vlan=bool(item.enable_vlan),
enable_vlanprofile=bool(item.enable_vlanprofile),
enable_vpn=bool(item.enable_vpn),
enable_vpnprofile=bool(item.enable_vpnprofile),
enable_slaprobe=bool(item.enable_slaprobe),
enable_slaprofile=bool(item.enable_slaprofile),
enable_alarm=bool(item.enable_alarm),
expose_metric=bool(item.expose_metric),
expose_datastream=bool(item.expose_datastream),
remote_system=get_reference(item.remote_system),
remote_id=item.remote_id,
)
@classmethod
def item_to_form(cls, item: Label) -> FormLabelItem:
return FormLabelItem(
name=item.name,
description=item.description,
bg_color1=item.bg_color1,
fg_color1=item.fg_color1,
bg_color2=item.bg_color2,
fg_color2=item.fg_color2,
is_protected=item.is_protected,
enable_agent=item.enable_agent,
enable_service=item.enable_service,
enable_serviceprofile=item.enable_serviceprofile,
enable_managedobject=item.enable_managedobject,
enable_managedobjectprofile=item.enable_managedobjectprofile,
enable_administrativedomain=item.enable_administrativedomain,
enable_authprofile=item.enable_authprofile,
enable_commandsnippet=item.enable_commandsnippet,
enable_allocationgroup=item.enable_allocationgroup,
enable_networksegment=item.enable_networksegment,
enable_object=item.enable_object,
enable_objectmodel=item.enable_objectmodel,
enable_platform=item.enable_platform,
enable_resourcegroup=item.enable_resourcegroup,
enable_sensor=item.enable_sensor,
enable_sensorprofile=item.enable_sensorprofile,
enable_subscriber=item.enable_subscriber,
enable_subscriberprofile=item.enable_subscriberprofile,
enable_supplier=item.enable_supplier,
enable_supplierprofile=item.enable_supplierprofile,
enable_dnszone=item.enable_dnszone,
enable_dnszonerecord=item.enable_dnszonerecord,
enable_division=item.enable_division,
enable_kbentry=item.enable_kbentry,
enable_ipaddress=item.enable_ipaddress,
enable_addressprofile=item.enable_addressprofile,
enable_ipaddressrange=item.enable_ipaddressrange,
enable_ipprefix=item.enable_ipprefix,
enable_prefixprofile=item.enable_prefixprofile,
enable_vrf=item.enable_vrf,
enable_vrfgroup=item.enable_vrfgroup,
enable_asn=item.enable_asn,
enable_assetpeer=item.enable_assetpeer,
enable_peer=item.enable_peer,
enable_vc=item.enable_vc,
enable_vlan=item.enable_vlan,
enable_vlanprofile=item.enable_vlanprofile,
enable_vpn=item.enable_vpn,
enable_vpnprofile=item.enable_vpnprofile,
enable_slaprobe=item.enable_slaprobe,
enable_slaprofile=item.enable_slaprofile,
enable_alarm=item.enable_alarm,
expose_metric=item.expose_metric,
expose_datastream=item.expose_datastream,
)
# Install endpoints
LabelAPI(router)
| 44.843902 | 94 | 0.657892 |
from fastapi import APIRouter
from noc.main.models.label import Label
from ..models.label import DefaultLabelItem, FormLabelItem, LabelLabelItem
from ..utils.ref import get_reference
from ..utils.rest.document import DocumentResourceAPI
from ..utils.rest.op import FilterExact, FuncFilter, FilterBool
router = APIRouter()
class LabelAPI(DocumentResourceAPI[Label]):
prefix = "/api/ui/label"
model = Label
list_ops = [
FuncFilter("query", function=lambda qs, values: qs.filter(name__icontains=values[0])),
FuncFilter("id", function=lambda qs, values: qs.filter(name__in=values)),
FilterExact("name"),
FilterBool("enable_agent"),
FilterBool("enable_service"),
FilterBool("enable_serviceprofile"),
FilterBool("enable_managedobject"),
FilterBool("enable_managedobjectprofile"),
FilterBool("enable_administrativedomain"),
FilterBool("enable_authprofile"),
FilterBool("enable_commandsnippet"),
FilterBool("enable_allocationgroup"),
FilterBool("enable_networksegment"),
FilterBool("enable_object"),
FilterBool("enable_objectmodel"),
FilterBool("enable_platform"),
FilterBool("enable_resourcegroup"),
FilterBool("enable_sensor"),
FilterBool("enable_sensorprofile"),
FilterBool("enable_subscriber"),
FilterBool("enable_subscriberprofile"),
FilterBool("enable_supplier"),
FilterBool("enable_supplierprofile"),
FilterBool("enable_dnszone"),
FilterBool("enable_dnszonerecord"),
FilterBool("enable_division"),
FilterBool("enable_kbentry"),
FilterBool("enable_ipaddress"),
FilterBool("enable_addressprofile"),
FilterBool("enable_ipaddressrange"),
FilterBool("enable_ipprefix"),
FilterBool("enable_prefixprofile"),
FilterBool("enable_vrf"),
FilterBool("enable_vrfgroup"),
FilterBool("enable_asn"),
FilterBool("enable_peer"),
FilterBool("enable_vc"),
FilterBool("enable_vlan"),
FilterBool("enable_vlanprofile"),
FilterBool("enable_vpn"),
FilterBool("enable_slaprobe"),
FilterBool("enable_slaprofile"),
FilterBool("enable_alarm"),
FilterBool("enable_vpnprofile"),
]
@classmethod
def item_to_label(cls, item: Label) -> LabelLabelItem:
return LabelLabelItem(
id=str(item.name),
label=str(item.name),
is_protected=item.is_protected,
scope=item.name.rsplit("::", 1)[0] if item.is_scoped else "",
value=item.name.split("::")[-1],
bg_color1=f"#{item.bg_color1:06x}",
fg_color1=f"#{item.fg_color1:06x}",
bg_color2=f"#{item.bg_color2:06x}",
fg_color2=f"#{item.fg_color2:06x}",
)
@classmethod
def item_to_default(cls, item: Label) -> DefaultLabelItem:
return DefaultLabelItem(
id=str(item.id),
name=str(item.name),
description=item.description,
bg_color1=item.bg_color1,
fg_color1=item.fg_color1,
bg_color2=item.bg_color2,
fg_color2=item.fg_color2,
is_protected=item.is_protected,
is_autogenerated=item.is_autogenerated,
enable_agent=bool(item.enable_agent),
enable_service=bool(item.enable_service),
enable_serviceprofile=bool(item.enable_serviceprofile),
enable_managedobject=bool(item.enable_managedobject),
enable_managedobjectprofile=bool(item.enable_managedobjectprofile),
enable_administrativedomain=bool(item.enable_administrativedomain),
enable_authprofile=bool(item.enable_authprofile),
enable_commandsnippet=bool(item.enable_commandsnippet),
enable_allocationgroup=bool(item.enable_allocationgroup),
enable_networksegment=bool(item.enable_networksegment),
enable_object=bool(item.enable_object),
enable_objectmodel=bool(item.enable_objectmodel),
enable_platform=bool(item.enable_platform),
enable_resourcegroup=bool(item.enable_resourcegroup),
enable_sensor=bool(item.enable_sensor),
enable_sensorprofile=bool(item.enable_sensorprofile),
enable_subscriber=bool(item.enable_subscriber),
enable_subscriberprofile=bool(item.enable_subscriberprofile),
enable_supplier=bool(item.enable_supplier),
enable_supplierprofile=bool(item.enable_supplierprofile),
enable_dnszone=bool(item.enable_dnszone),
enable_dnszonerecord=bool(item.enable_dnszonerecord),
enable_division=bool(item.enable_division),
enable_kbentry=bool(item.enable_kbentry),
enable_ipaddress=bool(item.enable_ipaddress),
enable_addressprofile=bool(item.enable_addressprofile),
enable_ipaddressrange=bool(item.enable_ipaddressrange),
enable_ipprefix=bool(item.enable_ipprefix),
enable_prefixprofile=bool(item.enable_prefixprofile),
enable_vrf=bool(item.enable_vrf),
enable_vrfgroup=bool(item.enable_vrfgroup),
enable_asn=bool(item.enable_asn),
enable_assetpeer=bool(item.enable_assetpeer),
enable_peer=bool(item.enable_peer),
enable_vc=bool(item.enable_vc),
enable_vlan=bool(item.enable_vlan),
enable_vlanprofile=bool(item.enable_vlanprofile),
enable_vpn=bool(item.enable_vpn),
enable_vpnprofile=bool(item.enable_vpnprofile),
enable_slaprobe=bool(item.enable_slaprobe),
enable_slaprofile=bool(item.enable_slaprofile),
enable_alarm=bool(item.enable_alarm),
expose_metric=bool(item.expose_metric),
expose_datastream=bool(item.expose_datastream),
remote_system=get_reference(item.remote_system),
remote_id=item.remote_id,
)
@classmethod
def item_to_form(cls, item: Label) -> FormLabelItem:
return FormLabelItem(
name=item.name,
description=item.description,
bg_color1=item.bg_color1,
fg_color1=item.fg_color1,
bg_color2=item.bg_color2,
fg_color2=item.fg_color2,
is_protected=item.is_protected,
enable_agent=item.enable_agent,
enable_service=item.enable_service,
enable_serviceprofile=item.enable_serviceprofile,
enable_managedobject=item.enable_managedobject,
enable_managedobjectprofile=item.enable_managedobjectprofile,
enable_administrativedomain=item.enable_administrativedomain,
enable_authprofile=item.enable_authprofile,
enable_commandsnippet=item.enable_commandsnippet,
enable_allocationgroup=item.enable_allocationgroup,
enable_networksegment=item.enable_networksegment,
enable_object=item.enable_object,
enable_objectmodel=item.enable_objectmodel,
enable_platform=item.enable_platform,
enable_resourcegroup=item.enable_resourcegroup,
enable_sensor=item.enable_sensor,
enable_sensorprofile=item.enable_sensorprofile,
enable_subscriber=item.enable_subscriber,
enable_subscriberprofile=item.enable_subscriberprofile,
enable_supplier=item.enable_supplier,
enable_supplierprofile=item.enable_supplierprofile,
enable_dnszone=item.enable_dnszone,
enable_dnszonerecord=item.enable_dnszonerecord,
enable_division=item.enable_division,
enable_kbentry=item.enable_kbentry,
enable_ipaddress=item.enable_ipaddress,
enable_addressprofile=item.enable_addressprofile,
enable_ipaddressrange=item.enable_ipaddressrange,
enable_ipprefix=item.enable_ipprefix,
enable_prefixprofile=item.enable_prefixprofile,
enable_vrf=item.enable_vrf,
enable_vrfgroup=item.enable_vrfgroup,
enable_asn=item.enable_asn,
enable_assetpeer=item.enable_assetpeer,
enable_peer=item.enable_peer,
enable_vc=item.enable_vc,
enable_vlan=item.enable_vlan,
enable_vlanprofile=item.enable_vlanprofile,
enable_vpn=item.enable_vpn,
enable_vpnprofile=item.enable_vpnprofile,
enable_slaprobe=item.enable_slaprobe,
enable_slaprofile=item.enable_slaprofile,
enable_alarm=item.enable_alarm,
expose_metric=item.expose_metric,
expose_datastream=item.expose_datastream,
)
LabelAPI(router)
| true | true |
f71c1e7d575a6e860d1fb2e06dd08a3844470ff2 | 51,171 | py | Python | dojo/engagement/views.py | pabloalexsandroalmeida83/django-DefectDojo | 094fbbf0d994c2f9b5c7ff071a28d4d5ed1f3a1c | [
"BSD-3-Clause"
] | null | null | null | dojo/engagement/views.py | pabloalexsandroalmeida83/django-DefectDojo | 094fbbf0d994c2f9b5c7ff071a28d4d5ed1f3a1c | [
"BSD-3-Clause"
] | null | null | null | dojo/engagement/views.py | pabloalexsandroalmeida83/django-DefectDojo | 094fbbf0d994c2f9b5c7ff071a28d4d5ed1f3a1c | [
"BSD-3-Clause"
] | null | null | null | import logging
import csv
import re
from openpyxl import Workbook
from openpyxl.styles import Font
from tempfile import NamedTemporaryFile
from datetime import datetime
import operator
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError, PermissionDenied
from django.urls import reverse
from django.db.models import Q, Count
from django.http import HttpResponseRedirect, StreamingHttpResponse, HttpResponse, FileResponse, QueryDict
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from django.utils import timezone
from time import strftime
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
from dojo.engagement.services import close_engagement, reopen_engagement
from dojo.filters import EngagementFilter, EngagementDirectFilter, EngagementTestFilter
from dojo.forms import CheckForm, \
UploadThreatForm, RiskAcceptanceForm, NoteForm, DoneForm, \
EngForm, TestForm, ReplaceRiskAcceptanceProofForm, AddFindingsRiskAcceptanceForm, DeleteEngagementForm, ImportScanForm, \
CredMappingForm, JIRAEngagementForm, JIRAImportScanForm, TypedNoteForm, JIRAProjectForm, \
EditRiskAcceptanceForm
from dojo.models import Finding, Product, Engagement, Test, \
Check_List, Test_Import, Notes, \
Risk_Acceptance, Development_Environment, Endpoint, \
Cred_Mapping, Dojo_User, System_Settings, Note_Type, Product_API_Scan_Configuration
from dojo.tools.factory import get_scan_types_sorted
from dojo.utils import add_error_message_to_response, add_success_message_to_response, get_page_items, add_breadcrumb, handle_uploaded_threat, \
FileIterWrapper, get_cal_event, Product_Tab, is_scan_file_too_large, \
get_system_setting, redirect_to_return_url_or_else, get_return_url
from dojo.notifications.helper import create_notification
from dojo.finding.views import find_available_notetypes
from functools import reduce
from django.db.models.query import Prefetch, QuerySet
import dojo.jira_link.helper as jira_helper
import dojo.risk_acceptance.helper as ra_helper
from dojo.risk_acceptance.helper import prefetch_for_expiration
from dojo.finding.helper import NOT_ACCEPTED_FINDINGS_QUERY
from django.views.decorators.vary import vary_on_cookie
from dojo.authorization.authorization import user_has_permission_or_403
from dojo.authorization.roles_permissions import Permissions
from dojo.product.queries import get_authorized_products
from dojo.engagement.queries import get_authorized_engagements
from dojo.authorization.authorization_decorators import user_is_authorized
from dojo.importers.importer.importer import DojoDefaultImporter as Importer
import dojo.notifications.helper as notifications_helper
from dojo.endpoint.utils import save_endpoints_to_add
logger = logging.getLogger(__name__)
@cache_page(60 * 5) # cache for 5 minutes
@vary_on_cookie
def engagement_calendar(request):
if 'lead' not in request.GET or '0' in request.GET.getlist('lead'):
engagements = get_authorized_engagements(Permissions.Engagement_View)
else:
filters = []
leads = request.GET.getlist('lead', '')
if '-1' in request.GET.getlist('lead'):
leads.remove('-1')
filters.append(Q(lead__isnull=True))
filters.append(Q(lead__in=leads))
engagements = get_authorized_engagements(Permissions.Engagement_View).filter(reduce(operator.or_, filters))
engagements = engagements.select_related('lead')
engagements = engagements.prefetch_related('product')
add_breadcrumb(
title="Engagement Calendar", top_level=True, request=request)
return render(
request, 'dojo/calendar.html', {
'caltype': 'engagements',
'leads': request.GET.getlist('lead', ''),
'engagements': engagements,
'users': Dojo_User.objects.all()
})
def get_filtered_engagements(request, view):
if view not in ['all', 'active']:
raise ValidationError(f'View {view} is not allowed')
engagements = get_authorized_engagements(Permissions.Engagement_View).order_by('-target_start')
if view == 'active':
engagements = engagements.filter(active=True)
engagements = engagements.select_related('product', 'product__prod_type') \
.prefetch_related('lead', 'tags', 'product__tags')
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
'jira_project__jira_instance',
'product__jira_project_set__jira_instance'
)
engagements = EngagementDirectFilter(request.GET, queryset=engagements)
return engagements
def get_test_counts(engagements):
# Get the test counts per engagement. As a separate query, this is much
# faster than annotating the above `engagements` query.
engagement_test_counts = {
test['engagement']: test['test_count']
for test in Test.objects.filter(
engagement__in=engagements
).values(
'engagement'
).annotate(
test_count=Count('engagement')
)
}
return engagement_test_counts
def engagements(request, view):
if not view:
view = 'active'
filtered_engagements = get_filtered_engagements(request, view)
engs = get_page_items(request, filtered_engagements.qs, 25)
product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list('name', flat=True))
engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct())
add_breadcrumb(
title=f"{view.capitalize()} Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagement.html', {
'engagements': engs,
'engagement_test_counts': get_test_counts(filtered_engagements.qs),
'filter_form': filtered_engagements.form,
'product_name_words': product_name_words,
'engagement_name_words': engagement_name_words,
'view': view.capitalize(),
})
def engagements_all(request):
products_with_engagements = get_authorized_products(Permissions.Engagement_View)
products_with_engagements = products_with_engagements.filter(~Q(engagement=None)).distinct()
# count using prefetch instead of just using 'engagement__set_test_test` to avoid loading all test in memory just to count them
filter_qs = products_with_engagements.prefetch_related(
Prefetch('engagement_set', queryset=Engagement.objects.all().annotate(test_count=Count('test__id')))
)
filter_qs = filter_qs.prefetch_related(
'engagement_set__tags',
'prod_type',
'engagement_set__lead',
'tags',
)
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
'engagement_set__jira_project__jira_instance',
'jira_project_set__jira_instance'
)
filtered = EngagementFilter(
request.GET,
queryset=filter_qs
)
prods = get_page_items(request, filtered.qs, 25)
name_words = products_with_engagements.values_list('name', flat=True)
eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct()
add_breadcrumb(
title="All Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagements_all.html', {
'products': prods,
'filter_form': filtered.form,
'name_words': sorted(set(name_words)),
'eng_words': sorted(set(eng_words)),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def edit_engagement(request, eid):
engagement = Engagement.objects.get(pk=eid)
is_ci_cd = engagement.engagement_type == "CI/CD"
jira_project_form = None
jira_epic_form = None
jira_project = None
jira_error = False
if request.method == 'POST':
form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
if form.is_valid():
# first save engagement details
new_status = form.cleaned_data.get('status')
engagement = form.save(commit=False)
if (new_status == "Cancelled" or new_status == "Completed"):
engagement.active = False
create_notification(event='close_engagement',
title='Closure of %s' % engagement.name,
description='The engagement "%s" was closed' % (engagement.name),
engagement=engagement, url=reverse('engagement_all_findings', args=(engagement.id, ))),
else:
engagement.active = True
engagement.save()
form.save_m2m()
messages.add_message(
request,
messages.SUCCESS,
'Engagement updated successfully.',
extra_tags='alert-success')
success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target='engagement', engagement=engagement, product=engagement.product)
error = not success
success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement)
error = error or not success
if not error:
if '_Add Tests' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(engagement.id, )))
else:
return HttpResponseRedirect(
reverse('view_engagement', args=(engagement.id, )))
else:
logger.debug(form.errors)
else:
form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_epic_form = None
if get_system_setting('enable_jira'):
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
jira_project_form = JIRAProjectForm(instance=jira_project, target='engagement', product=engagement.product)
logger.debug('showing jira-epic-form')
jira_epic_form = JIRAEngagementForm(instance=engagement)
if is_ci_cd:
title = 'Edit CI/CD Engagement'
else:
title = 'Edit Interactive Engagement'
product_tab = Product_Tab(engagement.product.id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/new_eng.html', {
'product_tab': product_tab,
'title': title,
'form': form,
'edit': True,
'jira_epic_form': jira_epic_form,
'jira_project_form': jira_project_form,
'engagement': engagement,
})
@user_is_authorized(Engagement, Permissions.Engagement_Delete, 'eid')
def delete_engagement(request, eid):
engagement = get_object_or_404(Engagement, pk=eid)
product = engagement.product
form = DeleteEngagementForm(instance=engagement)
if request.method == 'POST':
if 'id' in request.POST and str(engagement.id) == request.POST['id']:
form = DeleteEngagementForm(request.POST, instance=engagement)
if form.is_valid():
product = engagement.product
engagement.delete()
messages.add_message(
request,
messages.SUCCESS,
'Engagement and relationships removed.',
extra_tags='alert-success')
create_notification(event='other',
title='Deletion of %s' % engagement.name,
product=product,
description='The engagement "%s" was deleted by %s' % (engagement.name, request.user),
url=request.build_absolute_uri(reverse('view_engagements', args=(product.id, ))),
recipients=[engagement.lead],
icon="exclamation-triangle")
return HttpResponseRedirect(reverse("view_engagements", args=(product.id, )))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([engagement])
rels = collector.nested()
product_tab = Product_Tab(product.id, title="Delete Engagement", tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/delete_engagement.html', {
'product_tab': product_tab,
'engagement': engagement,
'form': form,
'rels': rels,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_engagement(request, eid):
eng = get_object_or_404(Engagement, id=eid)
tests = eng.test_set.all().order_by('test_type__name', '-updated')
default_page_num = 10
tests_filter = EngagementTestFilter(request.GET, queryset=tests, engagement=eng)
paged_tests = get_page_items(request, tests_filter.qs, default_page_num)
# prefetch only after creating the filters to avoid https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375
paged_tests.object_list = prefetch_for_view_tests(paged_tests.object_list)
prod = eng.product
risks_accepted = eng.risk_acceptance.all().select_related('owner').annotate(accepted_findings_count=Count('accepted_findings__id'))
preset_test_type = None
network = None
if eng.preset:
preset_test_type = eng.preset.test_type.all()
network = eng.preset.network_locations.all()
system_settings = System_Settings.objects.get()
jissue = jira_helper.get_jira_issue(eng)
jira_project = jira_helper.get_jira_project(eng)
try:
check = Check_List.objects.get(engagement=eng)
except:
check = None
pass
notes = eng.notes.all()
note_type_activation = Note_Type.objects.filter(is_active=True).count()
if note_type_activation:
available_note_types = find_available_notetypes(notes)
form = DoneForm()
files = eng.files.all()
if request.method == 'POST':
user_has_permission_or_403(request.user, eng, Permissions.Note_Add)
eng.progress = 'check_list'
eng.save()
if note_type_activation:
form = TypedNoteForm(request.POST, available_note_types=available_note_types)
else:
form = NoteForm(request.POST)
if form.is_valid():
new_note = form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
eng.notes.add(new_note)
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
url = request.build_absolute_uri(reverse("view_engagement", args=(eng.id,)))
title = "Engagement: %s on %s" % (eng.name, eng.product.name)
messages.add_message(request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
else:
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
creds = Cred_Mapping.objects.filter(
product=eng.product).select_related('cred_id').order_by('cred_id')
cred_eng = Cred_Mapping.objects.filter(
engagement=eng.id).select_related('cred_id').order_by('cred_id')
add_breadcrumb(parent=eng, top_level=False, request=request)
title = ""
if eng.engagement_type == "CI/CD":
title = " CI/CD"
product_tab = Product_Tab(prod.id, title="View" + title + " Engagement", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_eng.html', {
'eng': eng,
'product_tab': product_tab,
'system_settings': system_settings,
'tests': paged_tests,
'filter': tests_filter,
'check': check,
'threat': eng.tmodel_path,
'form': form,
'notes': notes,
'files': files,
'risks_accepted': risks_accepted,
'jissue': jissue,
'jira_project': jira_project,
'creds': creds,
'cred_eng': cred_eng,
'network': network,
'preset_test_type': preset_test_type
})
def prefetch_for_view_tests(tests):
prefetched = tests
if isinstance(tests,
QuerySet): # old code can arrive here with prods being a list because the query was already executed
prefetched = prefetched.select_related('lead')
prefetched = prefetched.prefetch_related('tags', 'test_type', 'notes')
prefetched = prefetched.annotate(count_findings_test_all=Count('finding__id', distinct=True))
prefetched = prefetched.annotate(count_findings_test_active=Count('finding__id', filter=Q(finding__active=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_active_verified=Count('finding__id', filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_mitigated=Count('finding__id', filter=Q(finding__is_mitigated=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_dups=Count('finding__id', filter=Q(finding__duplicate=True), distinct=True))
prefetched = prefetched.annotate(total_reimport_count=Count('test_import__id', filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True))
else:
logger.warn('unable to prefetch because query was already executed')
return prefetched
@user_is_authorized(Engagement, Permissions.Test_Add, 'eid')
def add_tests(request, eid):
eng = Engagement.objects.get(id=eid)
cred_form = CredMappingForm()
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if request.method == 'POST':
form = TestForm(request.POST, engagement=eng)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if form.is_valid():
new_test = form.save(commit=False)
# set default scan_type as it's used in reimport
new_test.scan_type = new_test.test_type.name
new_test.engagement = eng
try:
new_test.lead = User.objects.get(id=form['lead'].value())
except:
new_test.lead = None
pass
# Set status to in progress if a test is added
if eng.status != "In Progress" and eng.active is True:
eng.status = "In Progress"
eng.save()
new_test.save()
# Save the credential to the test
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = new_test
new_f.cred_id = cred_user.cred_id
new_f.save()
messages.add_message(
request,
messages.SUCCESS,
'Test added successfully.',
extra_tags='alert-success')
notifications_helper.notify_test_created(new_test)
if '_Add Another Test' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(eng.id, )))
elif '_Add Findings' in request.POST:
return HttpResponseRedirect(
reverse('add_findings', args=(new_test.id, )))
elif '_Finished' in request.POST:
return HttpResponseRedirect(
reverse('view_engagement', args=(eng.id, )))
else:
form = TestForm(engagement=eng)
form.initial['target_start'] = eng.target_start
form.initial['target_end'] = eng.target_end
form.initial['lead'] = request.user
add_breadcrumb(
parent=eng, title="Add Tests", top_level=False, request=request)
product_tab = Product_Tab(eng.product.id, title="Add Tests", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_tests.html', {
'product_tab': product_tab,
'form': form,
'cred_form': cred_form,
'eid': eid,
'eng': eng
})
# Cant use the easy decorator because of the potential for either eid/pid being used
def import_scan_results(request, eid=None, pid=None):
engagement = None
form = ImportScanForm()
cred_form = CredMappingForm()
finding_count = 0
jform = None
user = request.user
if eid:
engagement = get_object_or_404(Engagement, id=eid)
engagement_or_product = engagement
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(engagement=engagement).order_by('cred_id')
elif pid:
product = get_object_or_404(Product, id=pid)
engagement_or_product = product
elif not user.is_staff:
raise PermissionDenied
user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result)
push_all_jira_issues = jira_helper.is_push_all_issues(engagement_or_product)
if request.method == "POST":
form = ImportScanForm(request.POST, request.FILES)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=engagement).order_by('cred_id')
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(request.POST, push_all=push_all_jira_issues, prefix='jiraform')
logger.debug('jform valid: %s', jform.is_valid())
logger.debug('jform errors: %s', jform.errors)
if form.is_valid() and (jform is None or jform.is_valid()):
scan = request.FILES.get('file', None)
scan_date = form.cleaned_data['scan_date']
minimum_severity = form.cleaned_data['minimum_severity']
active = form.cleaned_data['active']
verified = form.cleaned_data['verified']
scan_type = request.POST['scan_type']
tags = form.cleaned_data['tags']
version = form.cleaned_data['version']
branch_tag = form.cleaned_data.get('branch_tag', None)
build_id = form.cleaned_data.get('build_id', None)
commit_hash = form.cleaned_data.get('commit_hash', None)
api_scan_configuration = form.cleaned_data.get('api_scan_configuration', None)
service = form.cleaned_data.get('service', None)
close_old_findings = form.cleaned_data.get('close_old_findings', None)
# Will save in the provided environment or in the `Development` one if absent
environment_id = request.POST.get('environment', 'Development')
environment = Development_Environment.objects.get(id=environment_id)
group_by = form.cleaned_data.get('group_by', None)
# TODO move to form validation?
if scan and is_scan_file_too_large(scan):
messages.add_message(request,
messages.ERROR,
"Report file is too large. Maximum supported size is {} MB".format(settings.SCAN_FILE_MAX_SIZE),
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('import_scan_results', args=(engagement,)))
# Allows for a test to be imported with an engagement created on the fly
if engagement is None:
engagement = Engagement()
engagement.name = "AdHoc Import - " + strftime("%a, %d %b %Y %X", timezone.now().timetuple())
engagement.threat_model = False
engagement.api_test = False
engagement.pen_test = False
engagement.check_list = False
engagement.target_start = timezone.now().date()
engagement.target_end = timezone.now().date()
engagement.product = product
engagement.active = True
engagement.status = 'In Progress'
engagement.version = version
engagement.branch_tag = branch_tag
engagement.build_id = build_id
engagement.commit_hash = commit_hash
engagement.save()
# can't use helper as when push_all_jira_issues is True, the checkbox gets disabled and is always false
# push_to_jira = jira_helper.is_push_to_jira(new_finding, jform.cleaned_data.get('push_to_jira'))
push_to_jira = push_all_jira_issues or (jform and jform.cleaned_data.get('push_to_jira'))
error = False
# Save newly added endpoints
added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, engagement.product)
try:
importer = Importer()
test, finding_count, closed_finding_count = importer.import_scan(scan, scan_type, engagement, user, environment, active=active, verified=verified, tags=tags,
minimum_severity=minimum_severity, endpoints_to_add=list(form.cleaned_data['endpoints']) + added_endpoints, scan_date=scan_date,
version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, push_to_jira=push_to_jira,
close_old_findings=close_old_findings, group_by=group_by, api_scan_configuration=api_scan_configuration, service=service)
message = f'{scan_type} processed a total of {finding_count} findings'
if close_old_findings:
message = message + ' and closed %d findings' % (closed_finding_count)
message = message + "."
add_success_message_to_response(message)
except Exception as e:
logger.exception(e)
add_error_message_to_response('An exception error occurred during the report import:%s' % str(e))
error = True
# Save the credential to the test
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = test
new_f.cred_id = cred_user.cred_id
new_f.save()
if not error:
return HttpResponseRedirect(
reverse('product_open_findings', args=(pid, )))
prod_id = None
custom_breadcrumb = None
title = "Import Scan Results"
if engagement:
prod_id = engagement.product.id
product_tab = Product_Tab(prod_id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
else:
prod_id = pid
custom_breadcrumb = {"", ""}
product_tab = Product_Tab(prod_id, title=title, tab="findings")
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(push_all=push_all_jira_issues, prefix='jiraform')
form.fields['endpoints'].queryset = Endpoint.objects.filter(product__id=product_tab.product.id)
form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id)
return render(request,
'dojo/import_scan_results.html',
{'form': form,
'product_tab': product_tab,
'engagement_or_product': engagement_or_product,
'custom_breadcrumb': custom_breadcrumb,
'title': title,
'cred_form': cred_form,
'jform': jform,
'scan_types': get_scan_types_sorted(),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def close_eng(request, eid):
eng = Engagement.objects.get(id=eid)
close_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement closed successfully.',
extra_tags='alert-success')
create_notification(event='close_engagement',
title='Closure of %s' % eng.name,
description='The engagement "%s" was closed' % (eng.name),
engagement=eng, url=reverse('engagement_all_findings', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def reopen_eng(request, eid):
eng = Engagement.objects.get(id=eid)
reopen_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement reopened successfully.',
extra_tags='alert-success')
create_notification(event='other',
title='Reopening of %s' % eng.name,
engagement=eng,
description='The engagement "%s" was reopened' % (eng.name),
url=reverse('view_engagement', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
"""
Greg:
status: in production
method to complete checklists from the engagement view
"""
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def complete_checklist(request, eid):
eng = get_object_or_404(Engagement, id=eid)
try:
checklist = Check_List.objects.get(engagement=eng)
except:
checklist = None
pass
add_breadcrumb(
parent=eng,
title="Complete checklist",
top_level=False,
request=request)
if request.method == 'POST':
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(request.POST, instance=checklist, findings=findings)
if form.is_valid():
cl = form.save(commit=False)
try:
check_l = Check_List.objects.get(engagement=eng)
cl.id = check_l.id
cl.save()
form.save_m2m()
except:
cl.engagement = eng
cl.save()
form.save_m2m()
pass
messages.add_message(
request,
messages.SUCCESS,
'Checklist saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(instance=checklist, findings=findings)
product_tab = Product_Tab(eng.product.id, title="Checklist", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/checklist.html', {
'form': form,
'product_tab': product_tab,
'eid': eng.id,
'findings': findings,
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def add_risk_acceptance(request, eid, fid=None):
eng = get_object_or_404(Engagement, id=eid)
finding = None
if fid:
finding = get_object_or_404(Finding, id=fid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
if request.method == 'POST':
form = RiskAcceptanceForm(request.POST, request.FILES)
if form.is_valid():
# first capture notes param as it cannot be saved directly as m2m
notes = None
if form.cleaned_data['notes']:
notes = Notes(
entry=form.cleaned_data['notes'],
author=request.user,
date=timezone.now())
notes.save()
del form.cleaned_data['notes']
try:
# we sometimes see a weird exception here, but are unable to reproduce.
# we add some logging in case it happens
risk_acceptance = form.save()
except Exception as e:
logger.debug(vars(request.POST))
logger.error(vars(form))
logger.exception(e)
raise
# attach note to risk acceptance object now in database
if notes:
risk_acceptance.notes.add(notes)
eng.risk_acceptance.add(risk_acceptance)
findings = form.cleaned_data['accepted_findings']
risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance saved.',
extra_tags='alert-success')
return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(eid, )))
else:
risk_acceptance_title_suggestion = 'Accept: %s' % finding
form = RiskAcceptanceForm(initial={'owner': request.user, 'name': risk_acceptance_title_suggestion})
finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by('title')
form.fields['accepted_findings'].queryset = finding_choices
if fid:
form.fields['accepted_findings'].initial = {fid}
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_risk_acceptance.html', {
'eng': eng,
'product_tab': product_tab,
'form': form
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=False)
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def edit_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=True)
# will only be called by view_risk_acceptance and edit_risk_acceptance
def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if edit_mode and not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
risk_acceptance_form = None
errors = False
if request.method == 'POST':
# deleting before instantiating the form otherwise django messes up and we end up with an empty path value
if len(request.FILES) > 0:
logger.debug('new proof uploaded')
risk_acceptance.path.delete()
if 'decision' in request.POST:
old_expiration_date = risk_acceptance.expiration_date
risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not risk_acceptance_form.is_valid()
if not errors:
logger.debug('path: %s', risk_acceptance_form.cleaned_data['path'])
risk_acceptance_form.save()
if risk_acceptance.expiration_date != old_expiration_date:
# risk acceptance was changed, check if risk acceptance needs to be reinstated and findings made accepted again
ra_helper.reinstate(risk_acceptance, old_expiration_date)
messages.add_message(
request,
messages.SUCCESS,
'Risk Acceptance saved successfully.',
extra_tags='alert-success')
if 'entry' in request.POST:
note_form = NoteForm(request.POST)
errors = errors or not note_form.is_valid()
if not errors:
new_note = note_form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
risk_acceptance.notes.add(new_note)
messages.add_message(
request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
if 'delete_note' in request.POST:
note = get_object_or_404(Notes, pk=request.POST['delete_note_id'])
if note.author.username == request.user.username:
risk_acceptance.notes.remove(note)
note.delete()
messages.add_message(
request,
messages.SUCCESS,
'Note deleted successfully.',
extra_tags='alert-success')
else:
messages.add_message(
request,
messages.ERROR,
"Since you are not the note's author, it was not deleted.",
extra_tags='alert-danger')
if 'remove_finding' in request.POST:
finding = get_object_or_404(
Finding, pk=request.POST['remove_finding_id'])
ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding)
messages.add_message(
request,
messages.SUCCESS,
'Finding removed successfully from risk acceptance.',
extra_tags='alert-success')
if 'replace_file' in request.POST:
replace_form = ReplaceRiskAcceptanceProofForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not replace_form.is_valid()
if not errors:
replace_form.save()
messages.add_message(
request,
messages.SUCCESS,
'New Proof uploaded successfully.',
extra_tags='alert-success')
else:
logger.error(replace_form.errors)
if 'add_findings' in request.POST:
add_findings_form = AddFindingsRiskAcceptanceForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not add_findings_form.is_valid()
if not errors:
findings = add_findings_form.cleaned_data['accepted_findings']
ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Finding%s added successfully.' % ('s' if len(findings) > 1
else ''),
extra_tags='alert-success')
if not errors:
logger.debug('redirecting to return_url')
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
else:
logger.error('errors found')
else:
if edit_mode:
risk_acceptance_form = EditRiskAcceptanceForm(instance=risk_acceptance)
note_form = NoteForm()
replace_form = ReplaceRiskAcceptanceProofForm(instance=risk_acceptance)
add_findings_form = AddFindingsRiskAcceptanceForm(instance=risk_acceptance)
accepted_findings = risk_acceptance.accepted_findings.order_by('numerical_severity')
fpage = get_page_items(request, accepted_findings, 15)
unaccepted_findings = Finding.objects.filter(test__in=eng.test_set.all()) \
.exclude(id__in=accepted_findings).order_by("title")
add_fpage = get_page_items(request, unaccepted_findings, 10, 'apage')
# on this page we need to add unaccepted findings as possible findings to add as accepted
add_findings_form.fields[
"accepted_findings"].queryset = add_fpage.object_list
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_risk_acceptance.html', {
'risk_acceptance': risk_acceptance,
'engagement': eng,
'product_tab': product_tab,
'accepted_findings': fpage,
'notes': risk_acceptance.notes.all(),
'eng': eng,
'edit_mode': edit_mode,
'risk_acceptance_form': risk_acceptance_form,
'note_form': note_form,
'replace_form': replace_form,
'add_findings_form': add_findings_form,
# 'show_add_findings_form': len(unaccepted_findings),
'request': request,
'add_findings': add_fpage,
'return_url': get_return_url(request),
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def expire_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.expire_now(risk_acceptance)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def reinstate_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
ra_helper.reinstate(risk_acceptance, risk_acceptance.expiration_date)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def delete_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.delete(eng, risk_acceptance)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance deleted successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse("view_engagement", args=(eng.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def download_risk_acceptance(request, eid, raid):
import mimetypes
mimetypes.init()
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
response = StreamingHttpResponse(
FileIterWrapper(
open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode='rb')))
response['Content-Disposition'] = 'attachment; filename="%s"' \
% risk_acceptance.filename()
mimetype, encoding = mimetypes.guess_type(risk_acceptance.path.name)
response['Content-Type'] = mimetype
return response
"""
Greg
status: in production
Upload a threat model at the engagement level. Threat models are stored
under media folder
"""
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def upload_threatmodel(request, eid):
eng = Engagement.objects.get(id=eid)
add_breadcrumb(
parent=eng,
title="Upload a threat model",
top_level=False,
request=request)
if request.method == 'POST':
form = UploadThreatForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_threat(request.FILES['file'], eng)
eng.progress = 'other'
eng.threat_model = True
eng.save()
messages.add_message(
request,
messages.SUCCESS,
'Threat model saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
form = UploadThreatForm()
product_tab = Product_Tab(eng.product.id, title="Upload Threat Model", tab="engagements")
return render(request, 'dojo/up_threat.html', {
'form': form,
'product_tab': product_tab,
'eng': eng,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_threatmodel(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
response = FileResponse(open(eng.tmodel_path, 'rb'))
return response
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def engagement_ics(request, eid):
eng = get_object_or_404(Engagement, id=eid)
start_date = datetime.combine(eng.target_start, datetime.min.time())
end_date = datetime.combine(eng.target_end, datetime.max.time())
uid = "dojo_eng_%d_%d" % (eng.id, eng.product.id)
cal = get_cal_event(
start_date, end_date,
"Engagement: %s (%s)" % (eng.name, eng.product.name),
"Set aside for engagement %s, on product %s. Additional detail can be found at %s"
% (eng.name, eng.product.name,
request.build_absolute_uri(
(reverse("view_engagement", args=(eng.id, ))))), uid)
output = cal.serialize()
response = HttpResponse(content=output)
response['Content-Type'] = 'text/calendar'
response['Content-Disposition'] = 'attachment; filename=%s.ics' % eng.name
return response
def get_list_index(list, index):
try:
element = list[index]
except Exception as e:
element = None
return element
def get_engagements(request):
url = request.META.get('QUERY_STRING')
if not url:
raise ValidationError('Please use the export button when exporting engagements')
else:
if url.startswith('url='):
url = url[4:]
path_items = list(filter(None, re.split('/|\?', url))) # noqa W605
if not path_items or path_items[0] != 'engagement':
raise ValidationError('URL is not an engagement view')
view = query = None
if get_list_index(path_items, 1) in ['active', 'all']:
view = get_list_index(path_items, 1)
query = get_list_index(path_items, 2)
else:
view = 'active'
query = get_list_index(path_items, 1)
request.GET = QueryDict(query)
engagements = get_filtered_engagements(request, view).qs
test_counts = get_test_counts(engagements)
return engagements, test_counts
def get_excludes():
return ['is_ci_cd', 'jira_issue', 'jira_project', 'objects', 'unaccepted_open_findings']
def get_foreign_keys():
return ['build_server', 'lead', 'orchestration_engine', 'preset', 'product',
'report_type', 'requester', 'source_code_management_server']
def csv_export(request):
engagements, test_counts = get_engagements(request)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=engagements.csv'
writer = csv.writer(response)
first_row = True
for engagement in engagements:
if first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
fields.append(key)
fields.append('tests')
writer.writerow(fields)
first_row = False
if not first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, str):
value = value.replace('\n', ' NEWLINE ').replace('\r', '')
fields.append(value)
fields.append(test_counts.get(engagement.id, 0))
writer.writerow(fields)
return response
def excel_export(request):
engagements, test_counts = get_engagements(request)
workbook = Workbook()
workbook.iso_dates = True
worksheet = workbook.active
worksheet.title = 'Engagements'
font_bold = Font(bold=True)
row_num = 1
for engagement in engagements:
if row_num == 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
cell = worksheet.cell(row=row_num, column=col_num, value=key)
cell.font = font_bold
col_num += 1
cell = worksheet.cell(row=row_num, column=col_num, value='tests')
cell.font = font_bold
row_num = 2
if row_num > 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, datetime):
value = value.replace(tzinfo=None)
worksheet.cell(row=row_num, column=col_num, value=value)
col_num += 1
worksheet.cell(row=row_num, column=col_num, value=test_counts.get(engagement.id, 0))
row_num += 1
with NamedTemporaryFile() as tmp:
workbook.save(tmp.name)
tmp.seek(0)
stream = tmp.read()
response = HttpResponse(
content=stream,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
return response
| 40.547544 | 182 | 0.643431 | import logging
import csv
import re
from openpyxl import Workbook
from openpyxl.styles import Font
from tempfile import NamedTemporaryFile
from datetime import datetime
import operator
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError, PermissionDenied
from django.urls import reverse
from django.db.models import Q, Count
from django.http import HttpResponseRedirect, StreamingHttpResponse, HttpResponse, FileResponse, QueryDict
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from django.utils import timezone
from time import strftime
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
from dojo.engagement.services import close_engagement, reopen_engagement
from dojo.filters import EngagementFilter, EngagementDirectFilter, EngagementTestFilter
from dojo.forms import CheckForm, \
UploadThreatForm, RiskAcceptanceForm, NoteForm, DoneForm, \
EngForm, TestForm, ReplaceRiskAcceptanceProofForm, AddFindingsRiskAcceptanceForm, DeleteEngagementForm, ImportScanForm, \
CredMappingForm, JIRAEngagementForm, JIRAImportScanForm, TypedNoteForm, JIRAProjectForm, \
EditRiskAcceptanceForm
from dojo.models import Finding, Product, Engagement, Test, \
Check_List, Test_Import, Notes, \
Risk_Acceptance, Development_Environment, Endpoint, \
Cred_Mapping, Dojo_User, System_Settings, Note_Type, Product_API_Scan_Configuration
from dojo.tools.factory import get_scan_types_sorted
from dojo.utils import add_error_message_to_response, add_success_message_to_response, get_page_items, add_breadcrumb, handle_uploaded_threat, \
FileIterWrapper, get_cal_event, Product_Tab, is_scan_file_too_large, \
get_system_setting, redirect_to_return_url_or_else, get_return_url
from dojo.notifications.helper import create_notification
from dojo.finding.views import find_available_notetypes
from functools import reduce
from django.db.models.query import Prefetch, QuerySet
import dojo.jira_link.helper as jira_helper
import dojo.risk_acceptance.helper as ra_helper
from dojo.risk_acceptance.helper import prefetch_for_expiration
from dojo.finding.helper import NOT_ACCEPTED_FINDINGS_QUERY
from django.views.decorators.vary import vary_on_cookie
from dojo.authorization.authorization import user_has_permission_or_403
from dojo.authorization.roles_permissions import Permissions
from dojo.product.queries import get_authorized_products
from dojo.engagement.queries import get_authorized_engagements
from dojo.authorization.authorization_decorators import user_is_authorized
from dojo.importers.importer.importer import DojoDefaultImporter as Importer
import dojo.notifications.helper as notifications_helper
from dojo.endpoint.utils import save_endpoints_to_add
logger = logging.getLogger(__name__)
@cache_page(60 * 5)
@vary_on_cookie
def engagement_calendar(request):
if 'lead' not in request.GET or '0' in request.GET.getlist('lead'):
engagements = get_authorized_engagements(Permissions.Engagement_View)
else:
filters = []
leads = request.GET.getlist('lead', '')
if '-1' in request.GET.getlist('lead'):
leads.remove('-1')
filters.append(Q(lead__isnull=True))
filters.append(Q(lead__in=leads))
engagements = get_authorized_engagements(Permissions.Engagement_View).filter(reduce(operator.or_, filters))
engagements = engagements.select_related('lead')
engagements = engagements.prefetch_related('product')
add_breadcrumb(
title="Engagement Calendar", top_level=True, request=request)
return render(
request, 'dojo/calendar.html', {
'caltype': 'engagements',
'leads': request.GET.getlist('lead', ''),
'engagements': engagements,
'users': Dojo_User.objects.all()
})
def get_filtered_engagements(request, view):
if view not in ['all', 'active']:
raise ValidationError(f'View {view} is not allowed')
engagements = get_authorized_engagements(Permissions.Engagement_View).order_by('-target_start')
if view == 'active':
engagements = engagements.filter(active=True)
engagements = engagements.select_related('product', 'product__prod_type') \
.prefetch_related('lead', 'tags', 'product__tags')
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
'jira_project__jira_instance',
'product__jira_project_set__jira_instance'
)
engagements = EngagementDirectFilter(request.GET, queryset=engagements)
return engagements
def get_test_counts(engagements):
engagement_test_counts = {
test['engagement']: test['test_count']
for test in Test.objects.filter(
engagement__in=engagements
).values(
'engagement'
).annotate(
test_count=Count('engagement')
)
}
return engagement_test_counts
def engagements(request, view):
if not view:
view = 'active'
filtered_engagements = get_filtered_engagements(request, view)
engs = get_page_items(request, filtered_engagements.qs, 25)
product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list('name', flat=True))
engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct())
add_breadcrumb(
title=f"{view.capitalize()} Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagement.html', {
'engagements': engs,
'engagement_test_counts': get_test_counts(filtered_engagements.qs),
'filter_form': filtered_engagements.form,
'product_name_words': product_name_words,
'engagement_name_words': engagement_name_words,
'view': view.capitalize(),
})
def engagements_all(request):
products_with_engagements = get_authorized_products(Permissions.Engagement_View)
products_with_engagements = products_with_engagements.filter(~Q(engagement=None)).distinct()
filter_qs = products_with_engagements.prefetch_related(
Prefetch('engagement_set', queryset=Engagement.objects.all().annotate(test_count=Count('test__id')))
)
filter_qs = filter_qs.prefetch_related(
'engagement_set__tags',
'prod_type',
'engagement_set__lead',
'tags',
)
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
'engagement_set__jira_project__jira_instance',
'jira_project_set__jira_instance'
)
filtered = EngagementFilter(
request.GET,
queryset=filter_qs
)
prods = get_page_items(request, filtered.qs, 25)
name_words = products_with_engagements.values_list('name', flat=True)
eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct()
add_breadcrumb(
title="All Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagements_all.html', {
'products': prods,
'filter_form': filtered.form,
'name_words': sorted(set(name_words)),
'eng_words': sorted(set(eng_words)),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def edit_engagement(request, eid):
engagement = Engagement.objects.get(pk=eid)
is_ci_cd = engagement.engagement_type == "CI/CD"
jira_project_form = None
jira_epic_form = None
jira_project = None
jira_error = False
if request.method == 'POST':
form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
if form.is_valid():
# first save engagement details
new_status = form.cleaned_data.get('status')
engagement = form.save(commit=False)
if (new_status == "Cancelled" or new_status == "Completed"):
engagement.active = False
create_notification(event='close_engagement',
title='Closure of %s' % engagement.name,
description='The engagement "%s" was closed' % (engagement.name),
engagement=engagement, url=reverse('engagement_all_findings', args=(engagement.id, ))),
else:
engagement.active = True
engagement.save()
form.save_m2m()
messages.add_message(
request,
messages.SUCCESS,
'Engagement updated successfully.',
extra_tags='alert-success')
success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target='engagement', engagement=engagement, product=engagement.product)
error = not success
success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement)
error = error or not success
if not error:
if '_Add Tests' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(engagement.id, )))
else:
return HttpResponseRedirect(
reverse('view_engagement', args=(engagement.id, )))
else:
logger.debug(form.errors)
else:
form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_epic_form = None
if get_system_setting('enable_jira'):
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
jira_project_form = JIRAProjectForm(instance=jira_project, target='engagement', product=engagement.product)
logger.debug('showing jira-epic-form')
jira_epic_form = JIRAEngagementForm(instance=engagement)
if is_ci_cd:
title = 'Edit CI/CD Engagement'
else:
title = 'Edit Interactive Engagement'
product_tab = Product_Tab(engagement.product.id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/new_eng.html', {
'product_tab': product_tab,
'title': title,
'form': form,
'edit': True,
'jira_epic_form': jira_epic_form,
'jira_project_form': jira_project_form,
'engagement': engagement,
})
@user_is_authorized(Engagement, Permissions.Engagement_Delete, 'eid')
def delete_engagement(request, eid):
engagement = get_object_or_404(Engagement, pk=eid)
product = engagement.product
form = DeleteEngagementForm(instance=engagement)
if request.method == 'POST':
if 'id' in request.POST and str(engagement.id) == request.POST['id']:
form = DeleteEngagementForm(request.POST, instance=engagement)
if form.is_valid():
product = engagement.product
engagement.delete()
messages.add_message(
request,
messages.SUCCESS,
'Engagement and relationships removed.',
extra_tags='alert-success')
create_notification(event='other',
title='Deletion of %s' % engagement.name,
product=product,
description='The engagement "%s" was deleted by %s' % (engagement.name, request.user),
url=request.build_absolute_uri(reverse('view_engagements', args=(product.id, ))),
recipients=[engagement.lead],
icon="exclamation-triangle")
return HttpResponseRedirect(reverse("view_engagements", args=(product.id, )))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([engagement])
rels = collector.nested()
product_tab = Product_Tab(product.id, title="Delete Engagement", tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/delete_engagement.html', {
'product_tab': product_tab,
'engagement': engagement,
'form': form,
'rels': rels,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_engagement(request, eid):
eng = get_object_or_404(Engagement, id=eid)
tests = eng.test_set.all().order_by('test_type__name', '-updated')
default_page_num = 10
tests_filter = EngagementTestFilter(request.GET, queryset=tests, engagement=eng)
paged_tests = get_page_items(request, tests_filter.qs, default_page_num)
# prefetch only after creating the filters to avoid https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375
paged_tests.object_list = prefetch_for_view_tests(paged_tests.object_list)
prod = eng.product
risks_accepted = eng.risk_acceptance.all().select_related('owner').annotate(accepted_findings_count=Count('accepted_findings__id'))
preset_test_type = None
network = None
if eng.preset:
preset_test_type = eng.preset.test_type.all()
network = eng.preset.network_locations.all()
system_settings = System_Settings.objects.get()
jissue = jira_helper.get_jira_issue(eng)
jira_project = jira_helper.get_jira_project(eng)
try:
check = Check_List.objects.get(engagement=eng)
except:
check = None
pass
notes = eng.notes.all()
note_type_activation = Note_Type.objects.filter(is_active=True).count()
if note_type_activation:
available_note_types = find_available_notetypes(notes)
form = DoneForm()
files = eng.files.all()
if request.method == 'POST':
user_has_permission_or_403(request.user, eng, Permissions.Note_Add)
eng.progress = 'check_list'
eng.save()
if note_type_activation:
form = TypedNoteForm(request.POST, available_note_types=available_note_types)
else:
form = NoteForm(request.POST)
if form.is_valid():
new_note = form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
eng.notes.add(new_note)
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
url = request.build_absolute_uri(reverse("view_engagement", args=(eng.id,)))
title = "Engagement: %s on %s" % (eng.name, eng.product.name)
messages.add_message(request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
else:
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
creds = Cred_Mapping.objects.filter(
product=eng.product).select_related('cred_id').order_by('cred_id')
cred_eng = Cred_Mapping.objects.filter(
engagement=eng.id).select_related('cred_id').order_by('cred_id')
add_breadcrumb(parent=eng, top_level=False, request=request)
title = ""
if eng.engagement_type == "CI/CD":
title = " CI/CD"
product_tab = Product_Tab(prod.id, title="View" + title + " Engagement", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_eng.html', {
'eng': eng,
'product_tab': product_tab,
'system_settings': system_settings,
'tests': paged_tests,
'filter': tests_filter,
'check': check,
'threat': eng.tmodel_path,
'form': form,
'notes': notes,
'files': files,
'risks_accepted': risks_accepted,
'jissue': jissue,
'jira_project': jira_project,
'creds': creds,
'cred_eng': cred_eng,
'network': network,
'preset_test_type': preset_test_type
})
def prefetch_for_view_tests(tests):
prefetched = tests
if isinstance(tests,
QuerySet): # old code can arrive here with prods being a list because the query was already executed
prefetched = prefetched.select_related('lead')
prefetched = prefetched.prefetch_related('tags', 'test_type', 'notes')
prefetched = prefetched.annotate(count_findings_test_all=Count('finding__id', distinct=True))
prefetched = prefetched.annotate(count_findings_test_active=Count('finding__id', filter=Q(finding__active=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_active_verified=Count('finding__id', filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_mitigated=Count('finding__id', filter=Q(finding__is_mitigated=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_dups=Count('finding__id', filter=Q(finding__duplicate=True), distinct=True))
prefetched = prefetched.annotate(total_reimport_count=Count('test_import__id', filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True))
else:
logger.warn('unable to prefetch because query was already executed')
return prefetched
@user_is_authorized(Engagement, Permissions.Test_Add, 'eid')
def add_tests(request, eid):
eng = Engagement.objects.get(id=eid)
cred_form = CredMappingForm()
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if request.method == 'POST':
form = TestForm(request.POST, engagement=eng)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if form.is_valid():
new_test = form.save(commit=False)
# set default scan_type as it's used in reimport
new_test.scan_type = new_test.test_type.name
new_test.engagement = eng
try:
new_test.lead = User.objects.get(id=form['lead'].value())
except:
new_test.lead = None
pass
if eng.status != "In Progress" and eng.active is True:
eng.status = "In Progress"
eng.save()
new_test.save()
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = new_test
new_f.cred_id = cred_user.cred_id
new_f.save()
messages.add_message(
request,
messages.SUCCESS,
'Test added successfully.',
extra_tags='alert-success')
notifications_helper.notify_test_created(new_test)
if '_Add Another Test' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(eng.id, )))
elif '_Add Findings' in request.POST:
return HttpResponseRedirect(
reverse('add_findings', args=(new_test.id, )))
elif '_Finished' in request.POST:
return HttpResponseRedirect(
reverse('view_engagement', args=(eng.id, )))
else:
form = TestForm(engagement=eng)
form.initial['target_start'] = eng.target_start
form.initial['target_end'] = eng.target_end
form.initial['lead'] = request.user
add_breadcrumb(
parent=eng, title="Add Tests", top_level=False, request=request)
product_tab = Product_Tab(eng.product.id, title="Add Tests", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_tests.html', {
'product_tab': product_tab,
'form': form,
'cred_form': cred_form,
'eid': eid,
'eng': eng
})
def import_scan_results(request, eid=None, pid=None):
engagement = None
form = ImportScanForm()
cred_form = CredMappingForm()
finding_count = 0
jform = None
user = request.user
if eid:
engagement = get_object_or_404(Engagement, id=eid)
engagement_or_product = engagement
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(engagement=engagement).order_by('cred_id')
elif pid:
product = get_object_or_404(Product, id=pid)
engagement_or_product = product
elif not user.is_staff:
raise PermissionDenied
user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result)
push_all_jira_issues = jira_helper.is_push_all_issues(engagement_or_product)
if request.method == "POST":
form = ImportScanForm(request.POST, request.FILES)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=engagement).order_by('cred_id')
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(request.POST, push_all=push_all_jira_issues, prefix='jiraform')
logger.debug('jform valid: %s', jform.is_valid())
logger.debug('jform errors: %s', jform.errors)
if form.is_valid() and (jform is None or jform.is_valid()):
scan = request.FILES.get('file', None)
scan_date = form.cleaned_data['scan_date']
minimum_severity = form.cleaned_data['minimum_severity']
active = form.cleaned_data['active']
verified = form.cleaned_data['verified']
scan_type = request.POST['scan_type']
tags = form.cleaned_data['tags']
version = form.cleaned_data['version']
branch_tag = form.cleaned_data.get('branch_tag', None)
build_id = form.cleaned_data.get('build_id', None)
commit_hash = form.cleaned_data.get('commit_hash', None)
api_scan_configuration = form.cleaned_data.get('api_scan_configuration', None)
service = form.cleaned_data.get('service', None)
close_old_findings = form.cleaned_data.get('close_old_findings', None)
environment_id = request.POST.get('environment', 'Development')
environment = Development_Environment.objects.get(id=environment_id)
group_by = form.cleaned_data.get('group_by', None)
if scan and is_scan_file_too_large(scan):
messages.add_message(request,
messages.ERROR,
"Report file is too large. Maximum supported size is {} MB".format(settings.SCAN_FILE_MAX_SIZE),
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('import_scan_results', args=(engagement,)))
if engagement is None:
engagement = Engagement()
engagement.name = "AdHoc Import - " + strftime("%a, %d %b %Y %X", timezone.now().timetuple())
engagement.threat_model = False
engagement.api_test = False
engagement.pen_test = False
engagement.check_list = False
engagement.target_start = timezone.now().date()
engagement.target_end = timezone.now().date()
engagement.product = product
engagement.active = True
engagement.status = 'In Progress'
engagement.version = version
engagement.branch_tag = branch_tag
engagement.build_id = build_id
engagement.commit_hash = commit_hash
engagement.save()
# push_to_jira = jira_helper.is_push_to_jira(new_finding, jform.cleaned_data.get('push_to_jira'))
push_to_jira = push_all_jira_issues or (jform and jform.cleaned_data.get('push_to_jira'))
error = False
# Save newly added endpoints
added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, engagement.product)
try:
importer = Importer()
test, finding_count, closed_finding_count = importer.import_scan(scan, scan_type, engagement, user, environment, active=active, verified=verified, tags=tags,
minimum_severity=minimum_severity, endpoints_to_add=list(form.cleaned_data['endpoints']) + added_endpoints, scan_date=scan_date,
version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, push_to_jira=push_to_jira,
close_old_findings=close_old_findings, group_by=group_by, api_scan_configuration=api_scan_configuration, service=service)
message = f'{scan_type} processed a total of {finding_count} findings'
if close_old_findings:
message = message + ' and closed %d findings' % (closed_finding_count)
message = message + "."
add_success_message_to_response(message)
except Exception as e:
logger.exception(e)
add_error_message_to_response('An exception error occurred during the report import:%s' % str(e))
error = True
# Save the credential to the test
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = test
new_f.cred_id = cred_user.cred_id
new_f.save()
if not error:
return HttpResponseRedirect(
reverse('product_open_findings', args=(pid, )))
prod_id = None
custom_breadcrumb = None
title = "Import Scan Results"
if engagement:
prod_id = engagement.product.id
product_tab = Product_Tab(prod_id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
else:
prod_id = pid
custom_breadcrumb = {"", ""}
product_tab = Product_Tab(prod_id, title=title, tab="findings")
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(push_all=push_all_jira_issues, prefix='jiraform')
form.fields['endpoints'].queryset = Endpoint.objects.filter(product__id=product_tab.product.id)
form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id)
return render(request,
'dojo/import_scan_results.html',
{'form': form,
'product_tab': product_tab,
'engagement_or_product': engagement_or_product,
'custom_breadcrumb': custom_breadcrumb,
'title': title,
'cred_form': cred_form,
'jform': jform,
'scan_types': get_scan_types_sorted(),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def close_eng(request, eid):
eng = Engagement.objects.get(id=eid)
close_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement closed successfully.',
extra_tags='alert-success')
create_notification(event='close_engagement',
title='Closure of %s' % eng.name,
description='The engagement "%s" was closed' % (eng.name),
engagement=eng, url=reverse('engagement_all_findings', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def reopen_eng(request, eid):
eng = Engagement.objects.get(id=eid)
reopen_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement reopened successfully.',
extra_tags='alert-success')
create_notification(event='other',
title='Reopening of %s' % eng.name,
engagement=eng,
description='The engagement "%s" was reopened' % (eng.name),
url=reverse('view_engagement', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def complete_checklist(request, eid):
eng = get_object_or_404(Engagement, id=eid)
try:
checklist = Check_List.objects.get(engagement=eng)
except:
checklist = None
pass
add_breadcrumb(
parent=eng,
title="Complete checklist",
top_level=False,
request=request)
if request.method == 'POST':
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(request.POST, instance=checklist, findings=findings)
if form.is_valid():
cl = form.save(commit=False)
try:
check_l = Check_List.objects.get(engagement=eng)
cl.id = check_l.id
cl.save()
form.save_m2m()
except:
cl.engagement = eng
cl.save()
form.save_m2m()
pass
messages.add_message(
request,
messages.SUCCESS,
'Checklist saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(instance=checklist, findings=findings)
product_tab = Product_Tab(eng.product.id, title="Checklist", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/checklist.html', {
'form': form,
'product_tab': product_tab,
'eid': eng.id,
'findings': findings,
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def add_risk_acceptance(request, eid, fid=None):
eng = get_object_or_404(Engagement, id=eid)
finding = None
if fid:
finding = get_object_or_404(Finding, id=fid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
if request.method == 'POST':
form = RiskAcceptanceForm(request.POST, request.FILES)
if form.is_valid():
# first capture notes param as it cannot be saved directly as m2m
notes = None
if form.cleaned_data['notes']:
notes = Notes(
entry=form.cleaned_data['notes'],
author=request.user,
date=timezone.now())
notes.save()
del form.cleaned_data['notes']
try:
# we sometimes see a weird exception here, but are unable to reproduce.
# we add some logging in case it happens
risk_acceptance = form.save()
except Exception as e:
logger.debug(vars(request.POST))
logger.error(vars(form))
logger.exception(e)
raise
# attach note to risk acceptance object now in database
if notes:
risk_acceptance.notes.add(notes)
eng.risk_acceptance.add(risk_acceptance)
findings = form.cleaned_data['accepted_findings']
risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance saved.',
extra_tags='alert-success')
return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(eid, )))
else:
risk_acceptance_title_suggestion = 'Accept: %s' % finding
form = RiskAcceptanceForm(initial={'owner': request.user, 'name': risk_acceptance_title_suggestion})
finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by('title')
form.fields['accepted_findings'].queryset = finding_choices
if fid:
form.fields['accepted_findings'].initial = {fid}
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_risk_acceptance.html', {
'eng': eng,
'product_tab': product_tab,
'form': form
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=False)
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def edit_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=True)
# will only be called by view_risk_acceptance and edit_risk_acceptance
def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if edit_mode and not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
risk_acceptance_form = None
errors = False
if request.method == 'POST':
# deleting before instantiating the form otherwise django messes up and we end up with an empty path value
if len(request.FILES) > 0:
logger.debug('new proof uploaded')
risk_acceptance.path.delete()
if 'decision' in request.POST:
old_expiration_date = risk_acceptance.expiration_date
risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not risk_acceptance_form.is_valid()
if not errors:
logger.debug('path: %s', risk_acceptance_form.cleaned_data['path'])
risk_acceptance_form.save()
if risk_acceptance.expiration_date != old_expiration_date:
# risk acceptance was changed, check if risk acceptance needs to be reinstated and findings made accepted again
ra_helper.reinstate(risk_acceptance, old_expiration_date)
messages.add_message(
request,
messages.SUCCESS,
'Risk Acceptance saved successfully.',
extra_tags='alert-success')
if 'entry' in request.POST:
note_form = NoteForm(request.POST)
errors = errors or not note_form.is_valid()
if not errors:
new_note = note_form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
risk_acceptance.notes.add(new_note)
messages.add_message(
request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
if 'delete_note' in request.POST:
note = get_object_or_404(Notes, pk=request.POST['delete_note_id'])
if note.author.username == request.user.username:
risk_acceptance.notes.remove(note)
note.delete()
messages.add_message(
request,
messages.SUCCESS,
'Note deleted successfully.',
extra_tags='alert-success')
else:
messages.add_message(
request,
messages.ERROR,
"Since you are not the note's author, it was not deleted.",
extra_tags='alert-danger')
if 'remove_finding' in request.POST:
finding = get_object_or_404(
Finding, pk=request.POST['remove_finding_id'])
ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding)
messages.add_message(
request,
messages.SUCCESS,
'Finding removed successfully from risk acceptance.',
extra_tags='alert-success')
if 'replace_file' in request.POST:
replace_form = ReplaceRiskAcceptanceProofForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not replace_form.is_valid()
if not errors:
replace_form.save()
messages.add_message(
request,
messages.SUCCESS,
'New Proof uploaded successfully.',
extra_tags='alert-success')
else:
logger.error(replace_form.errors)
if 'add_findings' in request.POST:
add_findings_form = AddFindingsRiskAcceptanceForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not add_findings_form.is_valid()
if not errors:
findings = add_findings_form.cleaned_data['accepted_findings']
ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Finding%s added successfully.' % ('s' if len(findings) > 1
else ''),
extra_tags='alert-success')
if not errors:
logger.debug('redirecting to return_url')
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
else:
logger.error('errors found')
else:
if edit_mode:
risk_acceptance_form = EditRiskAcceptanceForm(instance=risk_acceptance)
note_form = NoteForm()
replace_form = ReplaceRiskAcceptanceProofForm(instance=risk_acceptance)
add_findings_form = AddFindingsRiskAcceptanceForm(instance=risk_acceptance)
accepted_findings = risk_acceptance.accepted_findings.order_by('numerical_severity')
fpage = get_page_items(request, accepted_findings, 15)
unaccepted_findings = Finding.objects.filter(test__in=eng.test_set.all()) \
.exclude(id__in=accepted_findings).order_by("title")
add_fpage = get_page_items(request, unaccepted_findings, 10, 'apage')
add_findings_form.fields[
"accepted_findings"].queryset = add_fpage.object_list
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_risk_acceptance.html', {
'risk_acceptance': risk_acceptance,
'engagement': eng,
'product_tab': product_tab,
'accepted_findings': fpage,
'notes': risk_acceptance.notes.all(),
'eng': eng,
'edit_mode': edit_mode,
'risk_acceptance_form': risk_acceptance_form,
'note_form': note_form,
'replace_form': replace_form,
'add_findings_form': add_findings_form,
'request': request,
'add_findings': add_fpage,
'return_url': get_return_url(request),
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def expire_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.expire_now(risk_acceptance)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def reinstate_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
ra_helper.reinstate(risk_acceptance, risk_acceptance.expiration_date)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def delete_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.delete(eng, risk_acceptance)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance deleted successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse("view_engagement", args=(eng.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def download_risk_acceptance(request, eid, raid):
import mimetypes
mimetypes.init()
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
response = StreamingHttpResponse(
FileIterWrapper(
open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode='rb')))
response['Content-Disposition'] = 'attachment; filename="%s"' \
% risk_acceptance.filename()
mimetype, encoding = mimetypes.guess_type(risk_acceptance.path.name)
response['Content-Type'] = mimetype
return response
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def upload_threatmodel(request, eid):
eng = Engagement.objects.get(id=eid)
add_breadcrumb(
parent=eng,
title="Upload a threat model",
top_level=False,
request=request)
if request.method == 'POST':
form = UploadThreatForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_threat(request.FILES['file'], eng)
eng.progress = 'other'
eng.threat_model = True
eng.save()
messages.add_message(
request,
messages.SUCCESS,
'Threat model saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
form = UploadThreatForm()
product_tab = Product_Tab(eng.product.id, title="Upload Threat Model", tab="engagements")
return render(request, 'dojo/up_threat.html', {
'form': form,
'product_tab': product_tab,
'eng': eng,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_threatmodel(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
response = FileResponse(open(eng.tmodel_path, 'rb'))
return response
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def engagement_ics(request, eid):
eng = get_object_or_404(Engagement, id=eid)
start_date = datetime.combine(eng.target_start, datetime.min.time())
end_date = datetime.combine(eng.target_end, datetime.max.time())
uid = "dojo_eng_%d_%d" % (eng.id, eng.product.id)
cal = get_cal_event(
start_date, end_date,
"Engagement: %s (%s)" % (eng.name, eng.product.name),
"Set aside for engagement %s, on product %s. Additional detail can be found at %s"
% (eng.name, eng.product.name,
request.build_absolute_uri(
(reverse("view_engagement", args=(eng.id, ))))), uid)
output = cal.serialize()
response = HttpResponse(content=output)
response['Content-Type'] = 'text/calendar'
response['Content-Disposition'] = 'attachment; filename=%s.ics' % eng.name
return response
def get_list_index(list, index):
try:
element = list[index]
except Exception as e:
element = None
return element
def get_engagements(request):
url = request.META.get('QUERY_STRING')
if not url:
raise ValidationError('Please use the export button when exporting engagements')
else:
if url.startswith('url='):
url = url[4:]
path_items = list(filter(None, re.split('/|\?', url)))
if not path_items or path_items[0] != 'engagement':
raise ValidationError('URL is not an engagement view')
view = query = None
if get_list_index(path_items, 1) in ['active', 'all']:
view = get_list_index(path_items, 1)
query = get_list_index(path_items, 2)
else:
view = 'active'
query = get_list_index(path_items, 1)
request.GET = QueryDict(query)
engagements = get_filtered_engagements(request, view).qs
test_counts = get_test_counts(engagements)
return engagements, test_counts
def get_excludes():
return ['is_ci_cd', 'jira_issue', 'jira_project', 'objects', 'unaccepted_open_findings']
def get_foreign_keys():
return ['build_server', 'lead', 'orchestration_engine', 'preset', 'product',
'report_type', 'requester', 'source_code_management_server']
def csv_export(request):
engagements, test_counts = get_engagements(request)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=engagements.csv'
writer = csv.writer(response)
first_row = True
for engagement in engagements:
if first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
fields.append(key)
fields.append('tests')
writer.writerow(fields)
first_row = False
if not first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, str):
value = value.replace('\n', ' NEWLINE ').replace('\r', '')
fields.append(value)
fields.append(test_counts.get(engagement.id, 0))
writer.writerow(fields)
return response
def excel_export(request):
engagements, test_counts = get_engagements(request)
workbook = Workbook()
workbook.iso_dates = True
worksheet = workbook.active
worksheet.title = 'Engagements'
font_bold = Font(bold=True)
row_num = 1
for engagement in engagements:
if row_num == 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
cell = worksheet.cell(row=row_num, column=col_num, value=key)
cell.font = font_bold
col_num += 1
cell = worksheet.cell(row=row_num, column=col_num, value='tests')
cell.font = font_bold
row_num = 2
if row_num > 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, datetime):
value = value.replace(tzinfo=None)
worksheet.cell(row=row_num, column=col_num, value=value)
col_num += 1
worksheet.cell(row=row_num, column=col_num, value=test_counts.get(engagement.id, 0))
row_num += 1
with NamedTemporaryFile() as tmp:
workbook.save(tmp.name)
tmp.seek(0)
stream = tmp.read()
response = HttpResponse(
content=stream,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
return response
| true | true |
f71c1eaf10e717ab23c28074e182c01dfdc1b475 | 5,366 | py | Python | distributed/distributed/db.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 71 | 2016-11-13T03:26:45.000Z | 2022-02-22T08:13:04.000Z | distributed/distributed/db.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | distributed/distributed/db.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 36 | 2016-12-13T11:37:56.000Z | 2021-11-11T12:20:10.000Z | # Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import json
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.inspection import inspect
db = SQLAlchemy(session_options=dict(autoflush=True))
ALEMBIC_VERSION = "4b86bc0d40aa"
class Serializer(object):
"""Serialize a query result object."""
def to_dict(self):
ret = {}
for key in inspect(self).attrs.keys():
ret[key] = getattr(self, key)
return ret
class StringList(db.TypeDecorator):
"""List of comma-separated strings as field."""
impl = db.Text
def process_bind_param(self, value, dialect):
return ", ".join(value)
def process_result_value(self, value, dialect):
return value.split(", ")
class JsonType(db.TypeDecorator):
"""List of comma-separated strings as field."""
impl = db.Text
def process_bind_param(self, value, dialect):
return json.dumps(value)
def process_result_value(self, value, dialect):
return json.loads(value)
class Node(db.Model):
"""Cuckoo node database model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
url = db.Column(db.Text, nullable=False)
mode = db.Column(db.Text, nullable=False)
enabled = db.Column(db.Boolean, nullable=False)
machines = db.relationship("Machine", backref="node", lazy="dynamic")
def __init__(self, name, url, mode, enabled=True):
self.name = name
self.url = url
self.mode = mode
self.enabled = enabled
class Machine(db.Model):
"""Machine database model related to a Cuckoo node."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
platform = db.Column(db.Text, nullable=False)
tags = db.Column(StringList)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, name, platform, tags):
self.name = name
self.platform = platform
self.tags = tags
class Task(db.Model, Serializer):
"""Analysis task database model."""
PENDING = "pending"
ASSIGNED = "assigned"
PROCESSING = "processing"
FINISHED = "finished"
DELETED = "deleted"
task_status = db.Enum(PENDING, ASSIGNED, PROCESSING, FINISHED, DELETED,
name="task_status_type")
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.Text)
filename = db.Column(db.Text)
package = db.Column(db.Text)
timeout = db.Column(db.Integer)
priority = db.Column(db.Integer)
options = db.Column(db.Text)
machine = db.Column(db.Text)
platform = db.Column(db.Text)
tags = db.Column(db.Text)
custom = db.Column(db.Text)
owner = db.Column(db.Text)
memory = db.Column(db.Text)
clock = db.Column(db.Integer)
enforce_timeout = db.Column(db.Text)
# Cuckoo node and Task ID this has been submitted to.
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
task_id = db.Column(db.Integer)
status = db.Column(task_status, nullable=False)
# Timestamps for this task. When it was submitted, when it was delegated
# to a Cuckoo node, when the analysis started, and when we retrieved
# the report.
submitted = db.Column(db.DateTime(timezone=False), default=datetime.now)
delegated = db.Column(db.DateTime(timezone=False), nullable=True)
started = db.Column(db.DateTime(timezone=False), nullable=True)
completed = db.Column(db.DateTime(timezone=False), nullable=True)
__table_args__ = db.Index("ix_node_task", node_id, task_id),
def __init__(self, path=None, filename=None, package=None, timeout=None,
priority=None, options=None, machine=None, platform=None,
tags=None, custom=None, owner=None, memory=None, clock=None,
enforce_timeout=None, node_id=None, task_id=None,
status=PENDING):
self.path = path
self.filename = filename
self.package = package
self.timeout = timeout
self.priority = priority
self.options = options
self.machine = machine
self.platform = platform
self.tags = tags
self.custom = custom
self.owner = owner
self.memory = memory
self.clock = clock
self.enforce_timeout = enforce_timeout
self.node_id = node_id
self.task_id = task_id
self.status = status
class NodeStatus(db.Model, Serializer):
"""Node status monitoring database model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
timestamp = db.Column(db.DateTime(timezone=False), nullable=False,
index=True)
status = db.Column(JsonType, nullable=False)
def __init__(self, name, timestamp, status):
self.name = name
self.timestamp = timestamp
self.status = status
class AlembicVersion(db.Model):
"""Support model for keeping track of the alembic revision identifier."""
VERSION = ALEMBIC_VERSION
version_num = db.Column(db.Text, nullable=False, primary_key=True)
def __init__(self, version_num):
self.version_num = version_num
| 34.619355 | 77 | 0.660268 |
import json
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.inspection import inspect
db = SQLAlchemy(session_options=dict(autoflush=True))
ALEMBIC_VERSION = "4b86bc0d40aa"
class Serializer(object):
def to_dict(self):
ret = {}
for key in inspect(self).attrs.keys():
ret[key] = getattr(self, key)
return ret
class StringList(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
return ", ".join(value)
def process_result_value(self, value, dialect):
return value.split(", ")
class JsonType(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
return json.dumps(value)
def process_result_value(self, value, dialect):
return json.loads(value)
class Node(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
url = db.Column(db.Text, nullable=False)
mode = db.Column(db.Text, nullable=False)
enabled = db.Column(db.Boolean, nullable=False)
machines = db.relationship("Machine", backref="node", lazy="dynamic")
def __init__(self, name, url, mode, enabled=True):
self.name = name
self.url = url
self.mode = mode
self.enabled = enabled
class Machine(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
platform = db.Column(db.Text, nullable=False)
tags = db.Column(StringList)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, name, platform, tags):
self.name = name
self.platform = platform
self.tags = tags
class Task(db.Model, Serializer):
PENDING = "pending"
ASSIGNED = "assigned"
PROCESSING = "processing"
FINISHED = "finished"
DELETED = "deleted"
task_status = db.Enum(PENDING, ASSIGNED, PROCESSING, FINISHED, DELETED,
name="task_status_type")
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.Text)
filename = db.Column(db.Text)
package = db.Column(db.Text)
timeout = db.Column(db.Integer)
priority = db.Column(db.Integer)
options = db.Column(db.Text)
machine = db.Column(db.Text)
platform = db.Column(db.Text)
tags = db.Column(db.Text)
custom = db.Column(db.Text)
owner = db.Column(db.Text)
memory = db.Column(db.Text)
clock = db.Column(db.Integer)
enforce_timeout = db.Column(db.Text)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
task_id = db.Column(db.Integer)
status = db.Column(task_status, nullable=False)
submitted = db.Column(db.DateTime(timezone=False), default=datetime.now)
delegated = db.Column(db.DateTime(timezone=False), nullable=True)
started = db.Column(db.DateTime(timezone=False), nullable=True)
completed = db.Column(db.DateTime(timezone=False), nullable=True)
__table_args__ = db.Index("ix_node_task", node_id, task_id),
def __init__(self, path=None, filename=None, package=None, timeout=None,
priority=None, options=None, machine=None, platform=None,
tags=None, custom=None, owner=None, memory=None, clock=None,
enforce_timeout=None, node_id=None, task_id=None,
status=PENDING):
self.path = path
self.filename = filename
self.package = package
self.timeout = timeout
self.priority = priority
self.options = options
self.machine = machine
self.platform = platform
self.tags = tags
self.custom = custom
self.owner = owner
self.memory = memory
self.clock = clock
self.enforce_timeout = enforce_timeout
self.node_id = node_id
self.task_id = task_id
self.status = status
class NodeStatus(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
timestamp = db.Column(db.DateTime(timezone=False), nullable=False,
index=True)
status = db.Column(JsonType, nullable=False)
def __init__(self, name, timestamp, status):
self.name = name
self.timestamp = timestamp
self.status = status
class AlembicVersion(db.Model):
VERSION = ALEMBIC_VERSION
version_num = db.Column(db.Text, nullable=False, primary_key=True)
def __init__(self, version_num):
self.version_num = version_num
| true | true |
f71c1f3f5d963f1a9f0cad8f49f08a8a8952fb0f | 7,941 | py | Python | CTFd/config.py | mayoneko/CTFd | 825190ba3aef10f9cdc736f4d6f8ea2a5c8590ac | [
"Apache-2.0"
] | 2 | 2019-06-19T07:11:28.000Z | 2019-06-21T05:30:07.000Z | CTFd/config.py | mayoneko/CTFd | 825190ba3aef10f9cdc736f4d6f8ea2a5c8590ac | [
"Apache-2.0"
] | null | null | null | CTFd/config.py | mayoneko/CTFd | 825190ba3aef10f9cdc736f4d6f8ea2a5c8590ac | [
"Apache-2.0"
] | null | null | null | import os
''' GENERATE SECRET KEY '''
if not os.getenv('SECRET_KEY'):
# Attempt to read the secret from the secret file
# This will fail if the secret has not been written
try:
with open('.ctfd_secret_key', 'rb') as secret:
key = secret.read()
except (OSError, IOError):
key = None
if not key:
key = os.urandom(64)
# Attempt to write the secret file
# This will fail if the filesystem is read-only
try:
with open('.ctfd_secret_key', 'wb') as secret:
secret.write(key)
secret.flush()
except (OSError, IOError):
pass
''' SERVER SETTINGS '''
class Config(object):
"""
CTFd Configuration Object
"""
'''
=== REQUIRED SETTINGS ===
SECRET_KEY:
The secret value used to creation sessions and sign strings. This should be set to a random string. In the
interest of ease, CTFd will automatically create a secret key file for you. If you wish to add this secret key
to your instance you should hard code this value to a random static value.
You can also remove .ctfd_secret_key from the .gitignore file and commit this file into whatever repository
you are using.
http://flask.pocoo.org/docs/latest/quickstart/#sessions
SQLALCHEMY_DATABASE_URI:
The URI that specifies the username, password, hostname, port, and database of the server
used to hold the CTFd database.
e.g. mysql+pymysql://root:<YOUR_PASSWORD_HERE>@localhost/ctfd
CACHE_TYPE:
Specifies how CTFd should cache configuration values. If CACHE_TYPE is set to 'redis', CTFd will make use
of the REDIS_URL specified in environment variables. You can also choose to hardcode the REDIS_URL here.
It is important that you specify some sort of cache as CTFd uses it to store values received from the database. If
no cache is specified, CTFd will default to a simple per-worker cache. The simple cache cannot be effectively used
with multiple workers.
REDIS_URL is the URL to connect to a Redis server.
e.g. redis://user:password@localhost:6379
http://pythonhosted.org/Flask-Caching/#configuring-flask-caching
'''
SECRET_KEY = os.getenv('SECRET_KEY') or key
DATABASE_URL = os.getenv('DATABASE_URL') or 'sqlite:///{}/ctfd.db'.format(os.path.dirname(os.path.abspath(__file__)))
REDIS_URL = os.getenv('REDIS_URL')
SQLALCHEMY_DATABASE_URI = DATABASE_URL
CACHE_REDIS_URL = REDIS_URL
if CACHE_REDIS_URL:
CACHE_TYPE = 'redis'
else:
CACHE_TYPE = 'filesystem'
CACHE_DIR = os.path.join(os.path.dirname(__file__), os.pardir, '.data', 'filesystem_cache')
'''
=== SECURITY ===
SESSION_COOKIE_HTTPONLY:
Controls if cookies should be set with the HttpOnly flag.
PERMANENT_SESSION_LIFETIME:
The lifetime of a session. The default is 604800 seconds.
TRUSTED_PROXIES:
Defines a set of regular expressions used for finding a user's IP address if the CTFd instance
is behind a proxy. If you are running a CTF and users are on the same network as you, you may choose to remove
some proxies from the list.
CTFd only uses IP addresses for cursory tracking purposes. It is ill-advised to do anything complicated based
solely on IP addresses unless you know what you are doing.
'''
SESSION_COOKIE_HTTPONLY = (not os.getenv("SESSION_COOKIE_HTTPONLY")) # Defaults True
PERMANENT_SESSION_LIFETIME = int(os.getenv("PERMANENT_SESSION_LIFETIME") or 604800) # 7 days in seconds
TRUSTED_PROXIES = [
r'^127\.0\.0\.1$',
# Remove the following proxies if you do not trust the local network
# For example if you are running a CTF on your laptop and the teams are
# all on the same network
r'^::1$',
r'^fc00:',
r'^10\.',
r'^172\.(1[6-9]|2[0-9]|3[0-1])\.',
r'^192\.168\.'
]
'''
=== EMAIL ===
MAILFROM_ADDR:
The email address that emails are sent from if not overridden in the configuration panel.
MAIL_SERVER:
The mail server that emails are sent from if not overriden in the configuration panel.
MAIL_PORT:
The mail port that emails are sent from if not overriden in the configuration panel.
'''
MAILFROM_ADDR = os.getenv("MAILFROM_ADDR") or "noreply@ctfd.io"
MAIL_SERVER = os.getenv("MAIL_SERVER") or None
MAIL_PORT = os.getenv("MAIL_PORT")
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
MAIL_TLS = os.getenv("MAIL_TLS") or False
MAIL_SSL = os.getenv("MAIL_SSL") or False
MAILGUN_API_KEY = os.getenv("MAILGUN_API_KEY")
MAILGUN_BASE_URL = os.getenv("MAILGUN_BASE_URL")
'''
=== LOGS ===
LOG_FOLDER:
The location where logs are written. These are the logs for CTFd key submissions, registrations, and logins.
The default location is the CTFd/logs folder.
'''
LOG_FOLDER = os.getenv('LOG_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
'''
=== UPLOADS ===
UPLOAD_PROVIDER:
Specifies the service that CTFd should use to store files.
UPLOAD_FOLDER:
The location where files are uploaded. The default destination is the CTFd/uploads folder.
AWS_ACCESS_KEY_ID:
AWS access token used to authenticate to the S3 bucket.
AWS_SECRET_ACCESS_KEY:
AWS secret token used to authenticate to the S3 bucket.
AWS_S3_BUCKET:
The unique identifier for your S3 bucket.
AWS_S3_ENDPOINT_URL:
A URL pointing to a custom S3 implementation.
'''
UPLOAD_PROVIDER = os.getenv('UPLOAD_PROVIDER') or 'filesystem'
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
if UPLOAD_PROVIDER == 's3':
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_S3_BUCKET = os.getenv('AWS_S3_BUCKET')
AWS_S3_ENDPOINT_URL = os.getenv('AWS_S3_ENDPOINT_URL')
'''
=== OPTIONAL ===
REVERSE_PROXY:
Specifies whether CTFd is behind a reverse proxy or not. Set to True if using a reverse proxy like nginx.
TEMPLATES_AUTO_RELOAD:
Specifies whether Flask should check for modifications to templates and reload them automatically.
SQLALCHEMY_TRACK_MODIFICATIONS:
Automatically disabled to suppress warnings and save memory. You should only enable this if you need it.
UPDATE_CHECK:
Specifies whether or not CTFd will check whether or not there is a new version of CTFd
APPLICATION_ROOT:
Specifies what path CTFd is mounted under. It can be used to run CTFd in a subdirectory.
Example: /ctfd
'''
REVERSE_PROXY = os.getenv("REVERSE_PROXY") or False
TEMPLATES_AUTO_RELOAD = (not os.getenv("TEMPLATES_AUTO_RELOAD")) # Defaults True
SQLALCHEMY_TRACK_MODIFICATIONS = (not os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS")) # Defaults True
UPDATE_CHECK = (not os.getenv("UPDATE_CHECK")) # Defaults True
APPLICATION_ROOT = os.getenv('APPLICATION_ROOT') or '/'
'''
=== OAUTH ===
MajorLeagueCyber Integration
Register an event at https://majorleaguecyber.org/ and use the Client ID and Client Secret here
'''
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET")
class TestingConfig(Config):
SECRET_KEY = 'AAAAAAAAAAAAAAAAAAAA'
PRESERVE_CONTEXT_ON_EXCEPTION = False
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('TESTING_DATABASE_URL') or 'sqlite://'
SERVER_NAME = 'localhost'
UPDATE_CHECK = False
REDIS_URL = None
CACHE_TYPE = 'simple'
SAFE_MODE = True
| 37.107477 | 122 | 0.679008 | import os
if not os.getenv('SECRET_KEY'):
try:
with open('.ctfd_secret_key', 'rb') as secret:
key = secret.read()
except (OSError, IOError):
key = None
if not key:
key = os.urandom(64)
try:
with open('.ctfd_secret_key', 'wb') as secret:
secret.write(key)
secret.flush()
except (OSError, IOError):
pass
class Config(object):
SECRET_KEY = os.getenv('SECRET_KEY') or key
DATABASE_URL = os.getenv('DATABASE_URL') or 'sqlite:///{}/ctfd.db'.format(os.path.dirname(os.path.abspath(__file__)))
REDIS_URL = os.getenv('REDIS_URL')
SQLALCHEMY_DATABASE_URI = DATABASE_URL
CACHE_REDIS_URL = REDIS_URL
if CACHE_REDIS_URL:
CACHE_TYPE = 'redis'
else:
CACHE_TYPE = 'filesystem'
CACHE_DIR = os.path.join(os.path.dirname(__file__), os.pardir, '.data', 'filesystem_cache')
SESSION_COOKIE_HTTPONLY = (not os.getenv("SESSION_COOKIE_HTTPONLY"))
PERMANENT_SESSION_LIFETIME = int(os.getenv("PERMANENT_SESSION_LIFETIME") or 604800)
TRUSTED_PROXIES = [
r'^127\.0\.0\.1$',
r'^::1$',
r'^fc00:',
r'^10\.',
r'^172\.(1[6-9]|2[0-9]|3[0-1])\.',
r'^192\.168\.'
]
MAILFROM_ADDR = os.getenv("MAILFROM_ADDR") or "noreply@ctfd.io"
MAIL_SERVER = os.getenv("MAIL_SERVER") or None
MAIL_PORT = os.getenv("MAIL_PORT")
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
MAIL_TLS = os.getenv("MAIL_TLS") or False
MAIL_SSL = os.getenv("MAIL_SSL") or False
MAILGUN_API_KEY = os.getenv("MAILGUN_API_KEY")
MAILGUN_BASE_URL = os.getenv("MAILGUN_BASE_URL")
LOG_FOLDER = os.getenv('LOG_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
UPLOAD_PROVIDER = os.getenv('UPLOAD_PROVIDER') or 'filesystem'
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
if UPLOAD_PROVIDER == 's3':
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_S3_BUCKET = os.getenv('AWS_S3_BUCKET')
AWS_S3_ENDPOINT_URL = os.getenv('AWS_S3_ENDPOINT_URL')
REVERSE_PROXY = os.getenv("REVERSE_PROXY") or False
TEMPLATES_AUTO_RELOAD = (not os.getenv("TEMPLATES_AUTO_RELOAD"))
SQLALCHEMY_TRACK_MODIFICATIONS = (not os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS"))
UPDATE_CHECK = (not os.getenv("UPDATE_CHECK"))
APPLICATION_ROOT = os.getenv('APPLICATION_ROOT') or '/'
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET")
class TestingConfig(Config):
SECRET_KEY = 'AAAAAAAAAAAAAAAAAAAA'
PRESERVE_CONTEXT_ON_EXCEPTION = False
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('TESTING_DATABASE_URL') or 'sqlite://'
SERVER_NAME = 'localhost'
UPDATE_CHECK = False
REDIS_URL = None
CACHE_TYPE = 'simple'
SAFE_MODE = True
| true | true |
f71c20b62ca77df8f80354e21bf08002ca39890b | 984 | py | Python | src/jsm/api/mixins/infos.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 8 | 2021-07-26T10:54:10.000Z | 2021-12-06T08:41:02.000Z | src/jsm/api/mixins/infos.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 3 | 2021-08-09T10:25:39.000Z | 2021-12-06T08:40:41.000Z | src/jsm/api/mixins/infos.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 3 | 2021-08-22T01:55:11.000Z | 2021-09-13T13:51:42.000Z | # Copyright 2021 - Guillaume Charbonnier
# Licensed under the Apache License, Version 2.0 (the "License");
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import annotations
from typing import Optional, Union
from jsm.models.account_info import IoNatsJetstreamApiV1AccountInfoResponse
from jsm.models.errors import IoNatsJetstreamApiV1ErrorResponse
from .request_reply import BaseJetStreamRequestReplyMixin, JetStreamResponse
class AccountInfosMixin(BaseJetStreamRequestReplyMixin):
async def account_info(
self,
timeout: Optional[float] = None,
raise_on_error: Optional[bool] = None,
) -> Union[
IoNatsJetstreamApiV1AccountInfoResponse,
IoNatsJetstreamApiV1ErrorResponse,
]:
return await self._jetstream_request(
"INFO",
None,
JetStreamResponse[IoNatsJetstreamApiV1AccountInfoResponse],
raise_on_error=raise_on_error,
timeout=timeout,
)
| 32.8 | 76 | 0.729675 |
from __future__ import annotations
from typing import Optional, Union
from jsm.models.account_info import IoNatsJetstreamApiV1AccountInfoResponse
from jsm.models.errors import IoNatsJetstreamApiV1ErrorResponse
from .request_reply import BaseJetStreamRequestReplyMixin, JetStreamResponse
class AccountInfosMixin(BaseJetStreamRequestReplyMixin):
async def account_info(
self,
timeout: Optional[float] = None,
raise_on_error: Optional[bool] = None,
) -> Union[
IoNatsJetstreamApiV1AccountInfoResponse,
IoNatsJetstreamApiV1ErrorResponse,
]:
return await self._jetstream_request(
"INFO",
None,
JetStreamResponse[IoNatsJetstreamApiV1AccountInfoResponse],
raise_on_error=raise_on_error,
timeout=timeout,
)
| true | true |
f71c21777d66133ec1da30715a6556ddee5fd447 | 467 | py | Python | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exception.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 9 | 2022-01-10T18:39:45.000Z | 2022-03-06T03:51:41.000Z | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exception.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 10 | 2022-01-27T00:51:05.000Z | 2022-03-30T08:42:01.000Z | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exception.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
] | 6 | 2021-12-17T18:59:26.000Z | 2022-03-28T16:47:28.000Z | import click
from colorama import Fore, Style
class KsmCliException(click.ClickException):
in_a_shell = False
def colorize(self):
if KsmCliException.in_a_shell is False:
return str(self.message)
else:
return Fore.RED + str(self.message) + Style.RESET_ALL
def format_message(self):
return self.colorize()
def __str__(self):
return self.colorize()
class KsmRecordSyntaxException:
pass
| 19.458333 | 65 | 0.663812 | import click
from colorama import Fore, Style
class KsmCliException(click.ClickException):
in_a_shell = False
def colorize(self):
if KsmCliException.in_a_shell is False:
return str(self.message)
else:
return Fore.RED + str(self.message) + Style.RESET_ALL
def format_message(self):
return self.colorize()
def __str__(self):
return self.colorize()
class KsmRecordSyntaxException:
pass
| true | true |
f71c235a81782aee7af143ed9a0e3681bf47b496 | 5,934 | py | Python | simple_distillation_mgr/VLE_data_ethanol_water.py | ykholod/kettlebell-simple-distillation-mgr | 172900253be790f9fe7712ba5f20bcbb12a2a635 | [
"MIT"
] | null | null | null | simple_distillation_mgr/VLE_data_ethanol_water.py | ykholod/kettlebell-simple-distillation-mgr | 172900253be790f9fe7712ba5f20bcbb12a2a635 | [
"MIT"
] | null | null | null | simple_distillation_mgr/VLE_data_ethanol_water.py | ykholod/kettlebell-simple-distillation-mgr | 172900253be790f9fe7712ba5f20bcbb12a2a635 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" VLE data for ethanol-water mixture, isobaric, 1.01 bar """
__author__ = "Yaroslav Kholod"
__copyright__ = "Copyright 2019, The Kettlebell project"
__credits__ = "Yaroslav Kholod"
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Yaroslav Kholod"
__email__ = "pretorian.yaroslav@gmail.com"
__status__ = "Development"
# VLE data map mol fract to temperature
vle_data_dew = {}
# Ethanol dew point data
# Ethanol mol fract # Temperature, C
vle_data_dew[100] = 78.21
vle_data_dew[99] = 78.18
vle_data_dew[98] = 78.16
vle_data_dew[97] = 78.14
vle_data_dew[96] = 78.12
vle_data_dew[95] = 78.23
vle_data_dew[94] = 78.22
vle_data_dew[93] = 78.21
vle_data_dew[92] = 78.21
vle_data_dew[91] = 78.06
vle_data_dew[90] = 78.05
vle_data_dew[89] = 78.06
vle_data_dew[88] = 78.07
vle_data_dew[87] = 78.08
vle_data_dew[86] = 78.09
vle_data_dew[85] = 78.11
vle_data_dew[84] = 78.13
vle_data_dew[83] = 78.15
vle_data_dew[82] = 78.17
vle_data_dew[81] = 78.22
vle_data_dew[80] = 78.27
vle_data_dew[79] = 78.32
vle_data_dew[78] = 78.44
vle_data_dew[77] = 78.49
vle_data_dew[76] = 78.52
vle_data_dew[75] = 78.60
vle_data_dew[74] = 78.68
vle_data_dew[73] = 78.77
vle_data_dew[72] = 78.87
vle_data_dew[71] = 78.95
vle_data_dew[70] = 79.05
vle_data_dew[69] = 79.20
vle_data_dew[68] = 79.37
vle_data_dew[67] = 79.50
vle_data_dew[66] = 79.72
vle_data_dew[65] = 79.92
vle_data_dew[64] = 80.10
vle_data_dew[63] = 80.31
vle_data_dew[62] = 80.52
vle_data_dew[61] = 80.76
vle_data_dew[60] = 81.00
vle_data_dew[59] = 81.26
vle_data_dew[58] = 81.53
vle_data_dew[57] = 81.80
vle_data_dew[56] = 82.18
vle_data_dew[55] = 82.55
vle_data_dew[54] = 82.80
vle_data_dew[53] = 83.04
vle_data_dew[52] = 83.44
vle_data_dew[51] = 83.88
vle_data_dew[50] = 84.27
vle_data_dew[49] = 84.73
vle_data_dew[48] = 85.02
vle_data_dew[47] = 85.34
vle_data_dew[46] = 85.69
vle_data_dew[45] = 86.08
vle_data_dew[44] = 86.31
vle_data_dew[43] = 86.99
vle_data_dew[42] = 87.30
vle_data_dew[41] = 87.55
vle_data_dew[40] = 87.85
vle_data_dew[39] = 88.15
vle_data_dew[38] = 88.50
vle_data_dew[37] = 88.87
vle_data_dew[36] = 89.20
vle_data_dew[35] = 89.70
vle_data_dew[34] = 90.02
vle_data_dew[33] = 90.34
vle_data_dew[32] = 90.67
vle_data_dew[31] = 91.00
vle_data_dew[30] = 91.50
vle_data_dew[29] = 91.67
vle_data_dew[28] = 91.84
vle_data_dew[27] = 92.25
vle_data_dew[26] = 92.60
vle_data_dew[25] = 93.01
vle_data_dew[24] = 93.25
vle_data_dew[23] = 93.78
vle_data_dew[22] = 94.01
vle_data_dew[21] = 94.34
vle_data_dew[20] = 94.50
vle_data_dew[19] = 94.89
vle_data_dew[18] = 94.99
vle_data_dew[17] = 95.27
vle_data_dew[16] = 95.55
vle_data_dew[15] = 95.82
vle_data_dew[14] = 96.10
vle_data_dew[13] = 96.38
vle_data_dew[12] = 96.66
vle_data_dew[11] = 97.25
vle_data_dew[10] = 97.50
vle_data_dew[9] = 97.80
vle_data_dew[8] = 98.00
vle_data_dew[7] = 98.39
vle_data_dew[6] = 98.79
vle_data_dew[5] = 99.00
vle_data_dew[4] = 99.33
vle_data_dew[3] = 99.66
vle_data_dew[2] = 99.80
vle_data_dew[1] = 100.00
# VLE data map mol fract to temperature
vle_data_bubble = {}
# Ethanol bubble point data
# Ethanol mol fract # Temperature, C
vle_data_bubble[100] = 78.21
vle_data_bubble[99] = 78.19
vle_data_bubble[98] = 78.16
vle_data_bubble[97] = 78.14
vle_data_bubble[96] = 78.12
vle_data_bubble[95] = 78.14
vle_data_bubble[94] = 78.16
vle_data_bubble[93] = 78.17
vle_data_bubble[92] = 78.18
vle_data_bubble[91] = 78.19
vle_data_bubble[90] = 78.20
vle_data_bubble[89] = 78.21
vle_data_bubble[88] = 78.22
vle_data_bubble[87] = 78.23
vle_data_bubble[86] = 78.24
vle_data_bubble[85] = 78.26
vle_data_bubble[84] = 78.28
vle_data_bubble[83] = 78.30
vle_data_bubble[82] = 78.32
vle_data_bubble[81] = 78.32
vle_data_bubble[80] = 78.37
vle_data_bubble[79] = 78.40
vle_data_bubble[78] = 78.44
vle_data_bubble[77] = 78.47
vle_data_bubble[76] = 78.51
vle_data_bubble[75] = 78.54
vle_data_bubble[74] = 78.58
vle_data_bubble[73] = 78.63
vle_data_bubble[72] = 78.67
vle_data_bubble[71] = 78.72
vle_data_bubble[70] = 78.76
vle_data_bubble[69] = 78.81
vle_data_bubble[68] = 78.86
vle_data_bubble[67] = 78.92
vle_data_bubble[66] = 78.97
vle_data_bubble[65] = 79.03
vle_data_bubble[64] = 79.09
vle_data_bubble[63] = 79.15
vle_data_bubble[62] = 79.21
vle_data_bubble[61] = 79.27
vle_data_bubble[60] = 79.34
vle_data_bubble[59] = 79.40
vle_data_bubble[58] = 79.47
vle_data_bubble[57] = 79.54
vle_data_bubble[56] = 79.61
vle_data_bubble[55] = 79.68
vle_data_bubble[54] = 79.76
vle_data_bubble[53] = 79.83
vle_data_bubble[52] = 79.91
vle_data_bubble[51] = 79.99
vle_data_bubble[50] = 80.07
vle_data_bubble[49] = 80.15
vle_data_bubble[48] = 80.24
vle_data_bubble[47] = 80.32
vle_data_bubble[46] = 80.41
vle_data_bubble[45] = 80.50
vle_data_bubble[44] = 80.59
vle_data_bubble[43] = 80.68
vle_data_bubble[42] = 80.78
vle_data_bubble[41] = 80.88
vle_data_bubble[40] = 80.98
vle_data_bubble[39] = 81.08
vle_data_bubble[38] = 81.18
vle_data_bubble[37] = 81.29
vle_data_bubble[36] = 81.40
vle_data_bubble[35] = 81.51
vle_data_bubble[34] = 81.62
vle_data_bubble[33] = 81.74
vle_data_bubble[32] = 81.86
vle_data_bubble[31] = 81.99
vle_data_bubble[30] = 82.12
vle_data_bubble[29] = 82.26
vle_data_bubble[28] = 82.39
vle_data_bubble[27] = 82.54
vle_data_bubble[26] = 82.69
vle_data_bubble[25] = 82.18
vle_data_bubble[24] = 83.01
vle_data_bubble[23] = 83.19
vle_data_bubble[22] = 83.37
vle_data_bubble[21] = 83.56
vle_data_bubble[20] = 83.04
vle_data_bubble[19] = 83.98
vle_data_bubble[18] = 84.21
vle_data_bubble[17] = 84.46
vle_data_bubble[16] = 84.73
vle_data_bubble[15] = 84.27
vle_data_bubble[14] = 85.34
vle_data_bubble[13] = 85.69
vle_data_bubble[12] = 86.08
vle_data_bubble[11] = 86.51
vle_data_bubble[10] = 86.30
vle_data_bubble[9] = 87.53
vle_data_bubble[8] = 88.15
vle_data_bubble[7] = 88.87
vle_data_bubble[6] = 89.20
vle_data_bubble[5] = 90.67
vle_data_bubble[4] = 91.50
vle_data_bubble[3] = 93.01
vle_data_bubble[2] = 94.89
vle_data_bubble[1] = 97.25
| 26.373333 | 62 | 0.731378 |
__author__ = "Yaroslav Kholod"
__copyright__ = "Copyright 2019, The Kettlebell project"
__credits__ = "Yaroslav Kholod"
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Yaroslav Kholod"
__email__ = "pretorian.yaroslav@gmail.com"
__status__ = "Development"
vle_data_dew = {}
0] = 78.21
vle_data_dew[99] = 78.18
vle_data_dew[98] = 78.16
vle_data_dew[97] = 78.14
vle_data_dew[96] = 78.12
vle_data_dew[95] = 78.23
vle_data_dew[94] = 78.22
vle_data_dew[93] = 78.21
vle_data_dew[92] = 78.21
vle_data_dew[91] = 78.06
vle_data_dew[90] = 78.05
vle_data_dew[89] = 78.06
vle_data_dew[88] = 78.07
vle_data_dew[87] = 78.08
vle_data_dew[86] = 78.09
vle_data_dew[85] = 78.11
vle_data_dew[84] = 78.13
vle_data_dew[83] = 78.15
vle_data_dew[82] = 78.17
vle_data_dew[81] = 78.22
vle_data_dew[80] = 78.27
vle_data_dew[79] = 78.32
vle_data_dew[78] = 78.44
vle_data_dew[77] = 78.49
vle_data_dew[76] = 78.52
vle_data_dew[75] = 78.60
vle_data_dew[74] = 78.68
vle_data_dew[73] = 78.77
vle_data_dew[72] = 78.87
vle_data_dew[71] = 78.95
vle_data_dew[70] = 79.05
vle_data_dew[69] = 79.20
vle_data_dew[68] = 79.37
vle_data_dew[67] = 79.50
vle_data_dew[66] = 79.72
vle_data_dew[65] = 79.92
vle_data_dew[64] = 80.10
vle_data_dew[63] = 80.31
vle_data_dew[62] = 80.52
vle_data_dew[61] = 80.76
vle_data_dew[60] = 81.00
vle_data_dew[59] = 81.26
vle_data_dew[58] = 81.53
vle_data_dew[57] = 81.80
vle_data_dew[56] = 82.18
vle_data_dew[55] = 82.55
vle_data_dew[54] = 82.80
vle_data_dew[53] = 83.04
vle_data_dew[52] = 83.44
vle_data_dew[51] = 83.88
vle_data_dew[50] = 84.27
vle_data_dew[49] = 84.73
vle_data_dew[48] = 85.02
vle_data_dew[47] = 85.34
vle_data_dew[46] = 85.69
vle_data_dew[45] = 86.08
vle_data_dew[44] = 86.31
vle_data_dew[43] = 86.99
vle_data_dew[42] = 87.30
vle_data_dew[41] = 87.55
vle_data_dew[40] = 87.85
vle_data_dew[39] = 88.15
vle_data_dew[38] = 88.50
vle_data_dew[37] = 88.87
vle_data_dew[36] = 89.20
vle_data_dew[35] = 89.70
vle_data_dew[34] = 90.02
vle_data_dew[33] = 90.34
vle_data_dew[32] = 90.67
vle_data_dew[31] = 91.00
vle_data_dew[30] = 91.50
vle_data_dew[29] = 91.67
vle_data_dew[28] = 91.84
vle_data_dew[27] = 92.25
vle_data_dew[26] = 92.60
vle_data_dew[25] = 93.01
vle_data_dew[24] = 93.25
vle_data_dew[23] = 93.78
vle_data_dew[22] = 94.01
vle_data_dew[21] = 94.34
vle_data_dew[20] = 94.50
vle_data_dew[19] = 94.89
vle_data_dew[18] = 94.99
vle_data_dew[17] = 95.27
vle_data_dew[16] = 95.55
vle_data_dew[15] = 95.82
vle_data_dew[14] = 96.10
vle_data_dew[13] = 96.38
vle_data_dew[12] = 96.66
vle_data_dew[11] = 97.25
vle_data_dew[10] = 97.50
vle_data_dew[9] = 97.80
vle_data_dew[8] = 98.00
vle_data_dew[7] = 98.39
vle_data_dew[6] = 98.79
vle_data_dew[5] = 99.00
vle_data_dew[4] = 99.33
vle_data_dew[3] = 99.66
vle_data_dew[2] = 99.80
vle_data_dew[1] = 100.00
vle_data_bubble = {}
[100] = 78.21
vle_data_bubble[99] = 78.19
vle_data_bubble[98] = 78.16
vle_data_bubble[97] = 78.14
vle_data_bubble[96] = 78.12
vle_data_bubble[95] = 78.14
vle_data_bubble[94] = 78.16
vle_data_bubble[93] = 78.17
vle_data_bubble[92] = 78.18
vle_data_bubble[91] = 78.19
vle_data_bubble[90] = 78.20
vle_data_bubble[89] = 78.21
vle_data_bubble[88] = 78.22
vle_data_bubble[87] = 78.23
vle_data_bubble[86] = 78.24
vle_data_bubble[85] = 78.26
vle_data_bubble[84] = 78.28
vle_data_bubble[83] = 78.30
vle_data_bubble[82] = 78.32
vle_data_bubble[81] = 78.32
vle_data_bubble[80] = 78.37
vle_data_bubble[79] = 78.40
vle_data_bubble[78] = 78.44
vle_data_bubble[77] = 78.47
vle_data_bubble[76] = 78.51
vle_data_bubble[75] = 78.54
vle_data_bubble[74] = 78.58
vle_data_bubble[73] = 78.63
vle_data_bubble[72] = 78.67
vle_data_bubble[71] = 78.72
vle_data_bubble[70] = 78.76
vle_data_bubble[69] = 78.81
vle_data_bubble[68] = 78.86
vle_data_bubble[67] = 78.92
vle_data_bubble[66] = 78.97
vle_data_bubble[65] = 79.03
vle_data_bubble[64] = 79.09
vle_data_bubble[63] = 79.15
vle_data_bubble[62] = 79.21
vle_data_bubble[61] = 79.27
vle_data_bubble[60] = 79.34
vle_data_bubble[59] = 79.40
vle_data_bubble[58] = 79.47
vle_data_bubble[57] = 79.54
vle_data_bubble[56] = 79.61
vle_data_bubble[55] = 79.68
vle_data_bubble[54] = 79.76
vle_data_bubble[53] = 79.83
vle_data_bubble[52] = 79.91
vle_data_bubble[51] = 79.99
vle_data_bubble[50] = 80.07
vle_data_bubble[49] = 80.15
vle_data_bubble[48] = 80.24
vle_data_bubble[47] = 80.32
vle_data_bubble[46] = 80.41
vle_data_bubble[45] = 80.50
vle_data_bubble[44] = 80.59
vle_data_bubble[43] = 80.68
vle_data_bubble[42] = 80.78
vle_data_bubble[41] = 80.88
vle_data_bubble[40] = 80.98
vle_data_bubble[39] = 81.08
vle_data_bubble[38] = 81.18
vle_data_bubble[37] = 81.29
vle_data_bubble[36] = 81.40
vle_data_bubble[35] = 81.51
vle_data_bubble[34] = 81.62
vle_data_bubble[33] = 81.74
vle_data_bubble[32] = 81.86
vle_data_bubble[31] = 81.99
vle_data_bubble[30] = 82.12
vle_data_bubble[29] = 82.26
vle_data_bubble[28] = 82.39
vle_data_bubble[27] = 82.54
vle_data_bubble[26] = 82.69
vle_data_bubble[25] = 82.18
vle_data_bubble[24] = 83.01
vle_data_bubble[23] = 83.19
vle_data_bubble[22] = 83.37
vle_data_bubble[21] = 83.56
vle_data_bubble[20] = 83.04
vle_data_bubble[19] = 83.98
vle_data_bubble[18] = 84.21
vle_data_bubble[17] = 84.46
vle_data_bubble[16] = 84.73
vle_data_bubble[15] = 84.27
vle_data_bubble[14] = 85.34
vle_data_bubble[13] = 85.69
vle_data_bubble[12] = 86.08
vle_data_bubble[11] = 86.51
vle_data_bubble[10] = 86.30
vle_data_bubble[9] = 87.53
vle_data_bubble[8] = 88.15
vle_data_bubble[7] = 88.87
vle_data_bubble[6] = 89.20
vle_data_bubble[5] = 90.67
vle_data_bubble[4] = 91.50
vle_data_bubble[3] = 93.01
vle_data_bubble[2] = 94.89
vle_data_bubble[1] = 97.25
| true | true |
f71c249886014075a96d3e57f520b3963e70bc00 | 675 | py | Python | pytest/testSigning.py | RomanValov/ArmoryDB | 625eff9712161676ad83deb03616e6edb48283ca | [
"MIT"
] | 505 | 2016-02-04T15:54:46.000Z | 2022-03-27T18:43:01.000Z | pytest/testSigning.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 528 | 2016-02-06T19:50:12.000Z | 2022-01-15T10:21:16.000Z | pytest/testSigning.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 208 | 2015-01-02T10:31:40.000Z | 2021-12-14T07:37:36.000Z | import sys
sys.path.append('..')
import unittest
import random
from armoryengine.ALL import *
class SigningTester(unittest.TestCase):
def testLowSig(self):
sbdPrivKey = SecureBinaryData(b'\x01'*32)
pub = CryptoECDSA().ComputePublicKey(sbdPrivKey).toBinStr()
for i in range(100):
msg = "some random msg %s" % random.random()
sbdSig = CryptoECDSA().SignData(SecureBinaryData(msg), sbdPrivKey, False)
binSig = sbdSig.toBinStr()
derSig = createDERSigFromRS(binSig[:32], binSig[32:])
r, s = getRSFromDERSig(derSig)
j = binary_to_int(s, BIGENDIAN)
self.assertTrue( j <= SECP256K1_ORDER / 2)
| 28.125 | 82 | 0.657778 | import sys
sys.path.append('..')
import unittest
import random
from armoryengine.ALL import *
class SigningTester(unittest.TestCase):
def testLowSig(self):
sbdPrivKey = SecureBinaryData(b'\x01'*32)
pub = CryptoECDSA().ComputePublicKey(sbdPrivKey).toBinStr()
for i in range(100):
msg = "some random msg %s" % random.random()
sbdSig = CryptoECDSA().SignData(SecureBinaryData(msg), sbdPrivKey, False)
binSig = sbdSig.toBinStr()
derSig = createDERSigFromRS(binSig[:32], binSig[32:])
r, s = getRSFromDERSig(derSig)
j = binary_to_int(s, BIGENDIAN)
self.assertTrue( j <= SECP256K1_ORDER / 2)
| true | true |
f71c26869793672d4719326ffb01b5d7f0f78eb2 | 12,414 | py | Python | texar/torch/modules/decoders/rnn_decoders_test.py | wwt17/texar-pytorch | 9fb3ae8f7b541da5c808357033a93fba1817bfbd | [
"Apache-2.0"
] | 19 | 2020-07-29T15:25:45.000Z | 2022-01-19T17:49:42.000Z | texar/torch/modules/decoders/rnn_decoders_test.py | wwt17/texar-pytorch | 9fb3ae8f7b541da5c808357033a93fba1817bfbd | [
"Apache-2.0"
] | 3 | 2021-02-16T10:26:23.000Z | 2021-06-08T16:50:40.000Z | texar/torch/modules/decoders/rnn_decoders_test.py | wwt17/texar-pytorch | 9fb3ae8f7b541da5c808357033a93fba1817bfbd | [
"Apache-2.0"
] | 1 | 2019-10-06T07:54:30.000Z | 2019-10-06T07:54:30.000Z | """
Unit tests for RNN decoders.
"""
import unittest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from texar.torch.hyperparams import HParams
from texar.torch.modules.decoders.decoder_helpers import get_helper
from texar.torch.modules.decoders.rnn_decoders import (
AttentionRNNDecoder, AttentionRNNDecoderOutput, BasicRNNDecoder,
BasicRNNDecoderOutput)
from texar.torch.modules.embedders.embedders import WordEmbedder
from texar.torch.utils.utils import map_structure
class BasicRNNDecoderTest(unittest.TestCase):
r"""Tests :class:`~texar.torch.modules.decoders.rnn_decoders.BasicRNNDecoder`.
"""
def setUp(self):
self._vocab_size = 4
self._max_time = 8
self._batch_size = 16
self._emb_dim = 20
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._hparams = HParams(None, BasicRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
self.assertIsInstance(outputs, BasicRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
self.assertEqual(final_state[0].shape, (self._batch_size, hidden_size))
def test_decode_train(self):
r"""Tests decoding in training mode.
"""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
# Helper by default HParams
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Helper by decoding strategy
helper_train = decoder.create_helper(decoding_strategy='train_greedy')
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Implicit helper
outputs, final_state, sequence_lengths = decoder(
inputs=self._inputs, sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Eval helper through forward args
outputs, final_state, sequence_lengths = decoder(
embedding=self._embedder,
start_tokens=torch.tensor([1] * self._batch_size),
end_token=2, infer_mode=True)
self._test_outputs(
decoder, outputs, final_state, sequence_lengths, test_mode=True)
@staticmethod
def _assert_tensor_equal(a: torch.Tensor, b: torch.Tensor) -> bool:
if torch.is_tensor(a):
a = a.detach().numpy()
if torch.is_tensor(b):
b = b.detach().numpy()
if any(np.issubdtype(array.dtype, np.floating) for array in [a, b]):
return np.testing.assert_allclose(a, b, rtol=1e-5, atol=1e-8)
return np.testing.assert_array_equal(a, b)
def test_decode_train_with_torch(self):
r"""Compares decoding results with PyTorch built-in decoder.
"""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
input_size = self._emb_dim
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
num_layers = decoder.hparams.rnn_cell.num_layers
torch_lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True)
# match parameters
for name in ['weight_ih', 'weight_hh', 'bias_ih', 'bias_hh']:
setattr(torch_lstm, f'{name}_l0',
getattr(decoder._cell._cell, name))
torch_lstm.flatten_parameters()
output_layer = decoder._output_layer
input_lengths = torch.tensor([self._max_time] * self._batch_size)
inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
# decoder outputs
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
inputs=inputs,
sequence_length=input_lengths,
helper=helper_train)
# torch LSTM outputs
lstm_inputs = F.embedding(inputs, self._embedder.embedding)
torch_outputs, torch_states = torch_lstm(lstm_inputs)
torch_outputs = output_layer(torch_outputs)
torch_sample_id = torch.argmax(torch_outputs, dim=-1)
self.assertEqual(final_state[0].shape,
(self._batch_size, hidden_size))
self._assert_tensor_equal(outputs.logits, torch_outputs)
self._assert_tensor_equal(outputs.sample_id, torch_sample_id)
self._assert_tensor_equal(final_state[0], torch_states[0].squeeze(0))
self._assert_tensor_equal(final_state[1], torch_states[1].squeeze(0))
self._assert_tensor_equal(sequence_lengths, input_lengths)
def test_decode_infer(self):
r"""Tests decoding in inference mode."""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
decoder.eval()
start_tokens = torch.tensor([self._vocab_size - 2] * self._batch_size)
helpers = []
for strategy in ['infer_greedy', 'infer_sample']:
helper = decoder.create_helper(
decoding_strategy=strategy,
start_tokens=start_tokens,
end_token=self._vocab_size - 1)
helpers.append(helper)
for klass in ['TopKSampleEmbeddingHelper', 'SoftmaxEmbeddingHelper',
'GumbelSoftmaxEmbeddingHelper']:
helper = get_helper(
klass, start_tokens=start_tokens,
end_token=self._vocab_size - 1,
top_k=self._vocab_size // 2, tau=2.0,
straight_through=True)
helpers.append(helper)
for helper in helpers:
max_length = 100
outputs, final_state, sequence_lengths = decoder(
helper=helper, max_decoding_length=max_length)
self.assertLessEqual(max(sequence_lengths), max_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
class AttentionRNNDecoderTest(unittest.TestCase):
r"""Tests :class:`~texar.torch.modules.decoders.rnn_decoders.AttentionRNNDecoder`.
"""
def setUp(self):
self._vocab_size = 10
self._max_time = 16
self._batch_size = 8
self._emb_dim = 20
self._attention_dim = 256
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._encoder_output = torch.rand(
self._batch_size, self._max_time, 64)
self._test_hparams = {} # (cell_type, is_multi) -> hparams
for cell_type in ["RNNCell", "LSTMCell", "GRUCell"]:
hparams = {
"rnn_cell": {
'type': cell_type,
'kwargs': {
'num_units': 256,
},
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[(cell_type, False)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
hparams = {
"rnn_cell": {
'type': 'LSTMCell',
'kwargs': {
'num_units': 256,
},
'num_layers': 3,
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[("LSTMCell", True)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
cell_type = decoder.hparams.rnn_cell.type
is_multi = decoder.hparams.rnn_cell.num_layers > 1
self.assertIsInstance(outputs, AttentionRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
map_structure(
lambda t: self.assertEqual(
t.size(), (self._batch_size, hidden_size)),
final_state.cell_state)
state = final_state.cell_state
if is_multi:
self.assertIsInstance(state, list)
state = state[0]
if cell_type == "LSTMCell":
self.assertIsInstance(state, tuple)
state = state[0]
self.assertIsInstance(state, torch.Tensor)
def test_decode_infer(self):
r"""Tests decoding in inference mode.
"""
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
decoder.eval()
helper_infer = decoder.create_helper(
start_tokens=torch.tensor([1] * self._batch_size), end_token=2)
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_infer)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
def test_decode_train(self):
r"""Tests decoding in training mode.
"""
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_train,
inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
if __name__ == "__main__":
unittest.main()
| 39.160883 | 86 | 0.617931 |
import unittest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from texar.torch.hyperparams import HParams
from texar.torch.modules.decoders.decoder_helpers import get_helper
from texar.torch.modules.decoders.rnn_decoders import (
AttentionRNNDecoder, AttentionRNNDecoderOutput, BasicRNNDecoder,
BasicRNNDecoderOutput)
from texar.torch.modules.embedders.embedders import WordEmbedder
from texar.torch.utils.utils import map_structure
class BasicRNNDecoderTest(unittest.TestCase):
def setUp(self):
self._vocab_size = 4
self._max_time = 8
self._batch_size = 16
self._emb_dim = 20
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._hparams = HParams(None, BasicRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
self.assertIsInstance(outputs, BasicRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
self.assertEqual(final_state[0].shape, (self._batch_size, hidden_size))
def test_decode_train(self):
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
helper_train = decoder.create_helper(decoding_strategy='train_greedy')
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
outputs, final_state, sequence_lengths = decoder(
inputs=self._inputs, sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
outputs, final_state, sequence_lengths = decoder(
embedding=self._embedder,
start_tokens=torch.tensor([1] * self._batch_size),
end_token=2, infer_mode=True)
self._test_outputs(
decoder, outputs, final_state, sequence_lengths, test_mode=True)
@staticmethod
def _assert_tensor_equal(a: torch.Tensor, b: torch.Tensor) -> bool:
if torch.is_tensor(a):
a = a.detach().numpy()
if torch.is_tensor(b):
b = b.detach().numpy()
if any(np.issubdtype(array.dtype, np.floating) for array in [a, b]):
return np.testing.assert_allclose(a, b, rtol=1e-5, atol=1e-8)
return np.testing.assert_array_equal(a, b)
def test_decode_train_with_torch(self):
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
input_size = self._emb_dim
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
num_layers = decoder.hparams.rnn_cell.num_layers
torch_lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True)
for name in ['weight_ih', 'weight_hh', 'bias_ih', 'bias_hh']:
setattr(torch_lstm, f'{name}_l0',
getattr(decoder._cell._cell, name))
torch_lstm.flatten_parameters()
output_layer = decoder._output_layer
input_lengths = torch.tensor([self._max_time] * self._batch_size)
inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
inputs=inputs,
sequence_length=input_lengths,
helper=helper_train)
lstm_inputs = F.embedding(inputs, self._embedder.embedding)
torch_outputs, torch_states = torch_lstm(lstm_inputs)
torch_outputs = output_layer(torch_outputs)
torch_sample_id = torch.argmax(torch_outputs, dim=-1)
self.assertEqual(final_state[0].shape,
(self._batch_size, hidden_size))
self._assert_tensor_equal(outputs.logits, torch_outputs)
self._assert_tensor_equal(outputs.sample_id, torch_sample_id)
self._assert_tensor_equal(final_state[0], torch_states[0].squeeze(0))
self._assert_tensor_equal(final_state[1], torch_states[1].squeeze(0))
self._assert_tensor_equal(sequence_lengths, input_lengths)
def test_decode_infer(self):
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
decoder.eval()
start_tokens = torch.tensor([self._vocab_size - 2] * self._batch_size)
helpers = []
for strategy in ['infer_greedy', 'infer_sample']:
helper = decoder.create_helper(
decoding_strategy=strategy,
start_tokens=start_tokens,
end_token=self._vocab_size - 1)
helpers.append(helper)
for klass in ['TopKSampleEmbeddingHelper', 'SoftmaxEmbeddingHelper',
'GumbelSoftmaxEmbeddingHelper']:
helper = get_helper(
klass, start_tokens=start_tokens,
end_token=self._vocab_size - 1,
top_k=self._vocab_size // 2, tau=2.0,
straight_through=True)
helpers.append(helper)
for helper in helpers:
max_length = 100
outputs, final_state, sequence_lengths = decoder(
helper=helper, max_decoding_length=max_length)
self.assertLessEqual(max(sequence_lengths), max_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
class AttentionRNNDecoderTest(unittest.TestCase):
def setUp(self):
self._vocab_size = 10
self._max_time = 16
self._batch_size = 8
self._emb_dim = 20
self._attention_dim = 256
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._encoder_output = torch.rand(
self._batch_size, self._max_time, 64)
self._test_hparams = {}
for cell_type in ["RNNCell", "LSTMCell", "GRUCell"]:
hparams = {
"rnn_cell": {
'type': cell_type,
'kwargs': {
'num_units': 256,
},
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[(cell_type, False)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
hparams = {
"rnn_cell": {
'type': 'LSTMCell',
'kwargs': {
'num_units': 256,
},
'num_layers': 3,
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[("LSTMCell", True)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
cell_type = decoder.hparams.rnn_cell.type
is_multi = decoder.hparams.rnn_cell.num_layers > 1
self.assertIsInstance(outputs, AttentionRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
map_structure(
lambda t: self.assertEqual(
t.size(), (self._batch_size, hidden_size)),
final_state.cell_state)
state = final_state.cell_state
if is_multi:
self.assertIsInstance(state, list)
state = state[0]
if cell_type == "LSTMCell":
self.assertIsInstance(state, tuple)
state = state[0]
self.assertIsInstance(state, torch.Tensor)
def test_decode_infer(self):
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
decoder.eval()
helper_infer = decoder.create_helper(
start_tokens=torch.tensor([1] * self._batch_size), end_token=2)
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_infer)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
def test_decode_train(self):
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_train,
inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
if __name__ == "__main__":
unittest.main()
| true | true |
f71c2776956637e7ed8b7da0a4acf3481ed4e4c7 | 3,001 | py | Python | huaweicloud-sdk-vss/huaweicloudsdkvss/v3/model/delete_domains_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-vss/huaweicloudsdkvss/v3/model/delete_domains_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-vss/huaweicloudsdkvss/v3/model/delete_domains_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteDomainsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_name': 'str'
}
attribute_map = {
'domain_name': 'domain_name'
}
def __init__(self, domain_name=None):
"""DeleteDomainsRequest - a model defined in huaweicloud sdk"""
self._domain_name = None
self.discriminator = None
self.domain_name = domain_name
@property
def domain_name(self):
"""Gets the domain_name of this DeleteDomainsRequest.
域名
:return: The domain_name of this DeleteDomainsRequest.
:rtype: str
"""
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""Sets the domain_name of this DeleteDomainsRequest.
域名
:param domain_name: The domain_name of this DeleteDomainsRequest.
:type: str
"""
self._domain_name = domain_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteDomainsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.324561 | 79 | 0.553149 |
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteDomainsRequest:
sensitive_list = []
openapi_types = {
'domain_name': 'str'
}
attribute_map = {
'domain_name': 'domain_name'
}
def __init__(self, domain_name=None):
self._domain_name = None
self.discriminator = None
self.domain_name = domain_name
@property
def domain_name(self):
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
self._domain_name = domain_name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DeleteDomainsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.