body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
35a7abf8b86fc712dbb9e278d0cd7327529043205f45d42d0a25a77d02537848
def test_loss_function_processing(): 'Test loss function processing' targets = np.random.default_rng(0).integers(3, size=100) with pytest.raises(ValueError): ModelAnalyzer(None, None, targets)
Test loss function processing
tests/test_unit.py
test_loss_function_processing
cloudbopper/anamod
1
python
def test_loss_function_processing(): targets = np.random.default_rng(0).integers(3, size=100) with pytest.raises(ValueError): ModelAnalyzer(None, None, targets)
def test_loss_function_processing(): targets = np.random.default_rng(0).integers(3, size=100) with pytest.raises(ValueError): ModelAnalyzer(None, None, targets)<|docstring|>Test loss function processing<|endoftext|>
8492e6c52251553e681d28f6fd2e0d1b722062f67d1cc45eedce83968676e05f
def find_lib_path(): 'Find NNNet dynamic library files.\n\n Returns\n -------\n lib_path : list(string)\n List of all found path to the libraries\n ' if hasattr(__builtin__, 'NNVM_BASE_PATH'): base_path = __builtin__.NNVM_BASE_PATH else: base_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) if hasattr(__builtin__, 'NNVM_LIBRARY_NAME'): lib_name = __builtin__.NNVM_LIBRARY_NAME else: lib_name = ('nnvm_compiler' if sys.platform.startswith('win32') else 'libnnvm_compiler') api_path = os.path.join(base_path, '..', '..', 'lib') cmake_build_path_win = os.path.join(base_path, '..', '..', '..', 'build', 'Release') cmake_build_path = os.path.join(base_path, '..', '..', '..', 'build') install_path = os.path.join(base_path, '..', '..', '..') dll_path = [base_path, api_path, cmake_build_path_win, cmake_build_path, install_path] if (sys.platform.startswith('linux') and os.environ.get('LD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('darwin') and os.environ.get('DYLD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['DYLD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('win32') and os.environ.get('PATH', None)): dll_path.extend([p.strip() for p in os.environ['PATH'].split(';')]) if sys.platform.startswith('win32'): vs_configuration = 'Release' if (platform.architecture()[0] == '64bit'): dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', 'x64', vs_configuration)) else: dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', vs_configuration)) dll_path = [os.path.join(p, ('%s.dll' % lib_name)) for p in dll_path] elif sys.platform.startswith('darwin'): dll_path = [os.path.join(p, ('%s.dylib' % lib_name)) for p in dll_path] else: dll_path = [os.path.join(p, ('%s.so' % lib_name)) for p in dll_path] lib_path = [p for p in dll_path if (os.path.exists(p) and os.path.isfile(p))] if (not lib_path): raise RuntimeError((('Cannot find the files.\n' + 'List of candidates:\n') + str('\n'.join(dll_path)))) return lib_path
Find NNNet dynamic library files. Returns ------- lib_path : list(string) List of all found path to the libraries
nnvm/python/nnvm/libinfo.py
find_lib_path
Checkmate50/tvm
286
python
def find_lib_path(): 'Find NNNet dynamic library files.\n\n Returns\n -------\n lib_path : list(string)\n List of all found path to the libraries\n ' if hasattr(__builtin__, 'NNVM_BASE_PATH'): base_path = __builtin__.NNVM_BASE_PATH else: base_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) if hasattr(__builtin__, 'NNVM_LIBRARY_NAME'): lib_name = __builtin__.NNVM_LIBRARY_NAME else: lib_name = ('nnvm_compiler' if sys.platform.startswith('win32') else 'libnnvm_compiler') api_path = os.path.join(base_path, '..', '..', 'lib') cmake_build_path_win = os.path.join(base_path, '..', '..', '..', 'build', 'Release') cmake_build_path = os.path.join(base_path, '..', '..', '..', 'build') install_path = os.path.join(base_path, '..', '..', '..') dll_path = [base_path, api_path, cmake_build_path_win, cmake_build_path, install_path] if (sys.platform.startswith('linux') and os.environ.get('LD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('darwin') and os.environ.get('DYLD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['DYLD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('win32') and os.environ.get('PATH', None)): dll_path.extend([p.strip() for p in os.environ['PATH'].split(';')]) if sys.platform.startswith('win32'): vs_configuration = 'Release' if (platform.architecture()[0] == '64bit'): dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', 'x64', vs_configuration)) else: dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', vs_configuration)) dll_path = [os.path.join(p, ('%s.dll' % lib_name)) for p in dll_path] elif sys.platform.startswith('darwin'): dll_path = [os.path.join(p, ('%s.dylib' % lib_name)) for p in dll_path] else: dll_path = [os.path.join(p, ('%s.so' % lib_name)) for p in dll_path] lib_path = [p for p in dll_path if (os.path.exists(p) and os.path.isfile(p))] if (not lib_path): raise RuntimeError((('Cannot find the files.\n' + 'List of candidates:\n') + str('\n'.join(dll_path)))) return lib_path
def find_lib_path(): 'Find NNNet dynamic library files.\n\n Returns\n -------\n lib_path : list(string)\n List of all found path to the libraries\n ' if hasattr(__builtin__, 'NNVM_BASE_PATH'): base_path = __builtin__.NNVM_BASE_PATH else: base_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) if hasattr(__builtin__, 'NNVM_LIBRARY_NAME'): lib_name = __builtin__.NNVM_LIBRARY_NAME else: lib_name = ('nnvm_compiler' if sys.platform.startswith('win32') else 'libnnvm_compiler') api_path = os.path.join(base_path, '..', '..', 'lib') cmake_build_path_win = os.path.join(base_path, '..', '..', '..', 'build', 'Release') cmake_build_path = os.path.join(base_path, '..', '..', '..', 'build') install_path = os.path.join(base_path, '..', '..', '..') dll_path = [base_path, api_path, cmake_build_path_win, cmake_build_path, install_path] if (sys.platform.startswith('linux') and os.environ.get('LD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('darwin') and os.environ.get('DYLD_LIBRARY_PATH', None)): dll_path.extend([p.strip() for p in os.environ['DYLD_LIBRARY_PATH'].split(':')]) elif (sys.platform.startswith('win32') and os.environ.get('PATH', None)): dll_path.extend([p.strip() for p in os.environ['PATH'].split(';')]) if sys.platform.startswith('win32'): vs_configuration = 'Release' if (platform.architecture()[0] == '64bit'): dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', 'x64', vs_configuration)) else: dll_path.append(os.path.join(base_path, '..', '..', '..', 'build', vs_configuration)) dll_path.append(os.path.join(base_path, '..', '..', '..', 'windows', vs_configuration)) dll_path = [os.path.join(p, ('%s.dll' % lib_name)) for p in dll_path] elif sys.platform.startswith('darwin'): dll_path = [os.path.join(p, ('%s.dylib' % lib_name)) for p in dll_path] else: dll_path = [os.path.join(p, ('%s.so' % lib_name)) for p in dll_path] lib_path = [p for p in dll_path if (os.path.exists(p) and os.path.isfile(p))] if (not lib_path): raise RuntimeError((('Cannot find the files.\n' + 'List of candidates:\n') + str('\n'.join(dll_path)))) return lib_path<|docstring|>Find NNNet dynamic library files. Returns ------- lib_path : list(string) List of all found path to the libraries<|endoftext|>
6b0a1854e5886e80ed70c2c4a127f89b35775bc79079bb60371842ea42eb76a3
def lpsd(x, fs=1.0, window='hann', fmin=None, fmax=None, Jdes=1000, Kdes=100, Kmin=1, xi=0.5, scaling='density'): '\n Compute the LPSD power spectrum estimation with a logarithmic frequency axis.\n\n Parameters\n ----------\n x : array_like\n time series to be transformed. "We assume to have a long stream x(n),\n n=0, ..., N-1 of equally spaced input data sampled with frequency fs. Typical\n values for N range from 10^4 to >10^6" [1]\n\n fs : float\n Sampling frequency of the `x` time series. Defaults to 1.0.\n\n window : str\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even by\n default. See `scipy.signal.get_window` for a list of windows and required\n parameters. Defaults to a Hann window. "Choose a window function w(j, l) to\n reduce spectral leakage within the estimate. ... The computations of the window\n function will be performed when the segment lengths L(j) have been determined."\n [1]\n\n fmin, fmax : float, optional\n Lowest and highest frequency to estimate. Defaults to `fs / len(x)` and the\n Nyquist frequency `fs / 2`, respectively. "... we propose not to use the first\n few frequency bins. The first frequency bin that yields unbiased spectral\n estimates depends on the window function used. The bin is given by the effective\n half-width of the window transfer function." [1].\n\n Jdes : int, optional\n Desired number of Fourier frequencies. Defaults to 1000. "A typical value for J\n is 1000" [1]\n\n Kdes : int, optional\n Desired number of averages. Defaults to 100.\n\n Kmin : int, optional\n Minimum number of averages. Defaults to 1.\n\n xi : float, optional\n Fractional overlap between segments (0 <= xi < 1). Defaults to 0.5. "The\n amount of overlap is a trade-off between computational effort and flatness of\n the data weighting." [1]. See Figures 5 and 6 [1].\n\n scaling : {\'density\', \'spectrum\'}, optional\n Selects between computing the power spectral density (\'density\') where `Pxx` has\n units of V**2/Hz and computing the power spectrum (\'spectrum\') where `Pxx` has\n units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to\n \'density\'.\n\n Returns\n -------\n f : 1-d array\n Vector of frequencies corresponding to Pxx\n Pxx : 1d-array\n Vector of (uncalibrated) power spectrum estimates\n\n Notes\n -----\n The implementation follows references [1] and [2] quite closely; in particular, the\n variable names used in the program generally correspond to the variables in the\n paper; and the corresponding equation numbers are indicated in the comments.\n\n References\n ----------\n [1] Michael Tröbs and Gerhard Heinzel, "Improved spectrum estimation from\n digitized time series on a logarithmic frequency axis" in Measurement, vol 39\n (2006), pp 120-129.\n * http://dx.doi.org/10.1016/j.measurement.2005.10.010\n * http://pubman.mpdl.mpg.de/pubman/item/escidoc:150688:1\n\n [2] Michael Tröbs and Gerhard Heinzel, Corrigendum to "Improved spectrum\n estimation from digitized time series on a logarithmic frequency axis."\n ' assert (scaling in ['density', 'spectrum']) N = len(x) jj = np.arange(Jdes, dtype=int) if (not fmin): fmin = (fs / N) if (not fmax): fmax = (fs / 2) g = (np.log(fmax) - np.log(fmin)) f = (fmin * np.exp(((jj * g) / (Jdes - 1)))) rp = ((fmin * np.exp(((jj * g) / (Jdes - 1)))) * (np.exp((g / (Jdes - 1))) - 1)) ravg = ((fs / N) * (1 + ((1 - xi) * (Kdes - 1)))) rmin = ((fs / N) * (1 + ((1 - xi) * (Kmin - 1)))) case1 = (rp >= ravg) case2 = np.logical_and((rp < ravg), (np.sqrt((ravg * rp)) > rmin)) case3 = np.logical_not(np.logical_or(case1, case2)) rpp = np.zeros(Jdes) rpp[case1] = rp[case1] rpp[case2] = np.sqrt((ravg * rp[case2])) rpp[case3] = rmin L = np.around((fs / rpp)).astype(int) r = (fs / L) m = (f / r) Pxx = np.empty(Jdes) S1 = np.empty(Jdes) S2 = np.empty(Jdes) for jj in range(len(f)): D = int(np.around(((1 - xi) * L[jj]))) K = int(np.floor((((N - L[jj]) / D) + 1))) a = np.arange(L[jj]) b = (D * np.arange(K)) ii = (a[(:, np.newaxis)] + b) data = x[ii] data -= np.mean(data, axis=0) w = get_window(window, L[jj]) sinusoid = np.exp((((((- 2j) * np.pi) * np.arange(L[jj])[(:, np.newaxis)]) * m[jj]) / L[jj])) data = (data * (sinusoid * w[(:, np.newaxis)])) Pxx[jj] = np.mean((np.abs(np.sum(data, axis=0)) ** 2)) S1[jj] = np.sum(w) S2[jj] = np.sum((w ** 2)) if (scaling == 'spectrum'): C = (2.0 / (S1 ** 2)) Pxx = (Pxx * C) elif (scaling == 'density'): C = (2.0 / (fs * S2)) Pxx = (Pxx * C) return (f, Pxx)
Compute the LPSD power spectrum estimation with a logarithmic frequency axis. Parameters ---------- x : array_like time series to be transformed. "We assume to have a long stream x(n), n=0, ..., N-1 of equally spaced input data sampled with frequency fs. Typical values for N range from 10^4 to >10^6" [1] fs : float Sampling frequency of the `x` time series. Defaults to 1.0. window : str Desired window to use. If `window` is a string or tuple, it is passed to `scipy.signal.get_window` to generate the window values, which are DFT-even by default. See `scipy.signal.get_window` for a list of windows and required parameters. Defaults to a Hann window. "Choose a window function w(j, l) to reduce spectral leakage within the estimate. ... The computations of the window function will be performed when the segment lengths L(j) have been determined." [1] fmin, fmax : float, optional Lowest and highest frequency to estimate. Defaults to `fs / len(x)` and the Nyquist frequency `fs / 2`, respectively. "... we propose not to use the first few frequency bins. The first frequency bin that yields unbiased spectral estimates depends on the window function used. The bin is given by the effective half-width of the window transfer function." [1]. Jdes : int, optional Desired number of Fourier frequencies. Defaults to 1000. "A typical value for J is 1000" [1] Kdes : int, optional Desired number of averages. Defaults to 100. Kmin : int, optional Minimum number of averages. Defaults to 1. xi : float, optional Fractional overlap between segments (0 <= xi < 1). Defaults to 0.5. "The amount of overlap is a trade-off between computational effort and flatness of the data weighting." [1]. See Figures 5 and 6 [1]. scaling : {'density', 'spectrum'}, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density'. Returns ------- f : 1-d array Vector of frequencies corresponding to Pxx Pxx : 1d-array Vector of (uncalibrated) power spectrum estimates Notes ----- The implementation follows references [1] and [2] quite closely; in particular, the variable names used in the program generally correspond to the variables in the paper; and the corresponding equation numbers are indicated in the comments. References ---------- [1] Michael Tröbs and Gerhard Heinzel, "Improved spectrum estimation from digitized time series on a logarithmic frequency axis" in Measurement, vol 39 (2006), pp 120-129. * http://dx.doi.org/10.1016/j.measurement.2005.10.010 * http://pubman.mpdl.mpg.de/pubman/item/escidoc:150688:1 [2] Michael Tröbs and Gerhard Heinzel, Corrigendum to "Improved spectrum estimation from digitized time series on a logarithmic frequency axis."
freqtools/time_data.py
lpsd
bleykauf/freqtools
0
python
def lpsd(x, fs=1.0, window='hann', fmin=None, fmax=None, Jdes=1000, Kdes=100, Kmin=1, xi=0.5, scaling='density'): '\n Compute the LPSD power spectrum estimation with a logarithmic frequency axis.\n\n Parameters\n ----------\n x : array_like\n time series to be transformed. "We assume to have a long stream x(n),\n n=0, ..., N-1 of equally spaced input data sampled with frequency fs. Typical\n values for N range from 10^4 to >10^6" [1]\n\n fs : float\n Sampling frequency of the `x` time series. Defaults to 1.0.\n\n window : str\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even by\n default. See `scipy.signal.get_window` for a list of windows and required\n parameters. Defaults to a Hann window. "Choose a window function w(j, l) to\n reduce spectral leakage within the estimate. ... The computations of the window\n function will be performed when the segment lengths L(j) have been determined."\n [1]\n\n fmin, fmax : float, optional\n Lowest and highest frequency to estimate. Defaults to `fs / len(x)` and the\n Nyquist frequency `fs / 2`, respectively. "... we propose not to use the first\n few frequency bins. The first frequency bin that yields unbiased spectral\n estimates depends on the window function used. The bin is given by the effective\n half-width of the window transfer function." [1].\n\n Jdes : int, optional\n Desired number of Fourier frequencies. Defaults to 1000. "A typical value for J\n is 1000" [1]\n\n Kdes : int, optional\n Desired number of averages. Defaults to 100.\n\n Kmin : int, optional\n Minimum number of averages. Defaults to 1.\n\n xi : float, optional\n Fractional overlap between segments (0 <= xi < 1). Defaults to 0.5. "The\n amount of overlap is a trade-off between computational effort and flatness of\n the data weighting." [1]. See Figures 5 and 6 [1].\n\n scaling : {\'density\', \'spectrum\'}, optional\n Selects between computing the power spectral density (\'density\') where `Pxx` has\n units of V**2/Hz and computing the power spectrum (\'spectrum\') where `Pxx` has\n units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to\n \'density\'.\n\n Returns\n -------\n f : 1-d array\n Vector of frequencies corresponding to Pxx\n Pxx : 1d-array\n Vector of (uncalibrated) power spectrum estimates\n\n Notes\n -----\n The implementation follows references [1] and [2] quite closely; in particular, the\n variable names used in the program generally correspond to the variables in the\n paper; and the corresponding equation numbers are indicated in the comments.\n\n References\n ----------\n [1] Michael Tröbs and Gerhard Heinzel, "Improved spectrum estimation from\n digitized time series on a logarithmic frequency axis" in Measurement, vol 39\n (2006), pp 120-129.\n * http://dx.doi.org/10.1016/j.measurement.2005.10.010\n * http://pubman.mpdl.mpg.de/pubman/item/escidoc:150688:1\n\n [2] Michael Tröbs and Gerhard Heinzel, Corrigendum to "Improved spectrum\n estimation from digitized time series on a logarithmic frequency axis."\n ' assert (scaling in ['density', 'spectrum']) N = len(x) jj = np.arange(Jdes, dtype=int) if (not fmin): fmin = (fs / N) if (not fmax): fmax = (fs / 2) g = (np.log(fmax) - np.log(fmin)) f = (fmin * np.exp(((jj * g) / (Jdes - 1)))) rp = ((fmin * np.exp(((jj * g) / (Jdes - 1)))) * (np.exp((g / (Jdes - 1))) - 1)) ravg = ((fs / N) * (1 + ((1 - xi) * (Kdes - 1)))) rmin = ((fs / N) * (1 + ((1 - xi) * (Kmin - 1)))) case1 = (rp >= ravg) case2 = np.logical_and((rp < ravg), (np.sqrt((ravg * rp)) > rmin)) case3 = np.logical_not(np.logical_or(case1, case2)) rpp = np.zeros(Jdes) rpp[case1] = rp[case1] rpp[case2] = np.sqrt((ravg * rp[case2])) rpp[case3] = rmin L = np.around((fs / rpp)).astype(int) r = (fs / L) m = (f / r) Pxx = np.empty(Jdes) S1 = np.empty(Jdes) S2 = np.empty(Jdes) for jj in range(len(f)): D = int(np.around(((1 - xi) * L[jj]))) K = int(np.floor((((N - L[jj]) / D) + 1))) a = np.arange(L[jj]) b = (D * np.arange(K)) ii = (a[(:, np.newaxis)] + b) data = x[ii] data -= np.mean(data, axis=0) w = get_window(window, L[jj]) sinusoid = np.exp((((((- 2j) * np.pi) * np.arange(L[jj])[(:, np.newaxis)]) * m[jj]) / L[jj])) data = (data * (sinusoid * w[(:, np.newaxis)])) Pxx[jj] = np.mean((np.abs(np.sum(data, axis=0)) ** 2)) S1[jj] = np.sum(w) S2[jj] = np.sum((w ** 2)) if (scaling == 'spectrum'): C = (2.0 / (S1 ** 2)) Pxx = (Pxx * C) elif (scaling == 'density'): C = (2.0 / (fs * S2)) Pxx = (Pxx * C) return (f, Pxx)
def lpsd(x, fs=1.0, window='hann', fmin=None, fmax=None, Jdes=1000, Kdes=100, Kmin=1, xi=0.5, scaling='density'): '\n Compute the LPSD power spectrum estimation with a logarithmic frequency axis.\n\n Parameters\n ----------\n x : array_like\n time series to be transformed. "We assume to have a long stream x(n),\n n=0, ..., N-1 of equally spaced input data sampled with frequency fs. Typical\n values for N range from 10^4 to >10^6" [1]\n\n fs : float\n Sampling frequency of the `x` time series. Defaults to 1.0.\n\n window : str\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even by\n default. See `scipy.signal.get_window` for a list of windows and required\n parameters. Defaults to a Hann window. "Choose a window function w(j, l) to\n reduce spectral leakage within the estimate. ... The computations of the window\n function will be performed when the segment lengths L(j) have been determined."\n [1]\n\n fmin, fmax : float, optional\n Lowest and highest frequency to estimate. Defaults to `fs / len(x)` and the\n Nyquist frequency `fs / 2`, respectively. "... we propose not to use the first\n few frequency bins. The first frequency bin that yields unbiased spectral\n estimates depends on the window function used. The bin is given by the effective\n half-width of the window transfer function." [1].\n\n Jdes : int, optional\n Desired number of Fourier frequencies. Defaults to 1000. "A typical value for J\n is 1000" [1]\n\n Kdes : int, optional\n Desired number of averages. Defaults to 100.\n\n Kmin : int, optional\n Minimum number of averages. Defaults to 1.\n\n xi : float, optional\n Fractional overlap between segments (0 <= xi < 1). Defaults to 0.5. "The\n amount of overlap is a trade-off between computational effort and flatness of\n the data weighting." [1]. See Figures 5 and 6 [1].\n\n scaling : {\'density\', \'spectrum\'}, optional\n Selects between computing the power spectral density (\'density\') where `Pxx` has\n units of V**2/Hz and computing the power spectrum (\'spectrum\') where `Pxx` has\n units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to\n \'density\'.\n\n Returns\n -------\n f : 1-d array\n Vector of frequencies corresponding to Pxx\n Pxx : 1d-array\n Vector of (uncalibrated) power spectrum estimates\n\n Notes\n -----\n The implementation follows references [1] and [2] quite closely; in particular, the\n variable names used in the program generally correspond to the variables in the\n paper; and the corresponding equation numbers are indicated in the comments.\n\n References\n ----------\n [1] Michael Tröbs and Gerhard Heinzel, "Improved spectrum estimation from\n digitized time series on a logarithmic frequency axis" in Measurement, vol 39\n (2006), pp 120-129.\n * http://dx.doi.org/10.1016/j.measurement.2005.10.010\n * http://pubman.mpdl.mpg.de/pubman/item/escidoc:150688:1\n\n [2] Michael Tröbs and Gerhard Heinzel, Corrigendum to "Improved spectrum\n estimation from digitized time series on a logarithmic frequency axis."\n ' assert (scaling in ['density', 'spectrum']) N = len(x) jj = np.arange(Jdes, dtype=int) if (not fmin): fmin = (fs / N) if (not fmax): fmax = (fs / 2) g = (np.log(fmax) - np.log(fmin)) f = (fmin * np.exp(((jj * g) / (Jdes - 1)))) rp = ((fmin * np.exp(((jj * g) / (Jdes - 1)))) * (np.exp((g / (Jdes - 1))) - 1)) ravg = ((fs / N) * (1 + ((1 - xi) * (Kdes - 1)))) rmin = ((fs / N) * (1 + ((1 - xi) * (Kmin - 1)))) case1 = (rp >= ravg) case2 = np.logical_and((rp < ravg), (np.sqrt((ravg * rp)) > rmin)) case3 = np.logical_not(np.logical_or(case1, case2)) rpp = np.zeros(Jdes) rpp[case1] = rp[case1] rpp[case2] = np.sqrt((ravg * rp[case2])) rpp[case3] = rmin L = np.around((fs / rpp)).astype(int) r = (fs / L) m = (f / r) Pxx = np.empty(Jdes) S1 = np.empty(Jdes) S2 = np.empty(Jdes) for jj in range(len(f)): D = int(np.around(((1 - xi) * L[jj]))) K = int(np.floor((((N - L[jj]) / D) + 1))) a = np.arange(L[jj]) b = (D * np.arange(K)) ii = (a[(:, np.newaxis)] + b) data = x[ii] data -= np.mean(data, axis=0) w = get_window(window, L[jj]) sinusoid = np.exp((((((- 2j) * np.pi) * np.arange(L[jj])[(:, np.newaxis)]) * m[jj]) / L[jj])) data = (data * (sinusoid * w[(:, np.newaxis)])) Pxx[jj] = np.mean((np.abs(np.sum(data, axis=0)) ** 2)) S1[jj] = np.sum(w) S2[jj] = np.sum((w ** 2)) if (scaling == 'spectrum'): C = (2.0 / (S1 ** 2)) Pxx = (Pxx * C) elif (scaling == 'density'): C = (2.0 / (fs * S2)) Pxx = (Pxx * C) return (f, Pxx)<|docstring|>Compute the LPSD power spectrum estimation with a logarithmic frequency axis. Parameters ---------- x : array_like time series to be transformed. "We assume to have a long stream x(n), n=0, ..., N-1 of equally spaced input data sampled with frequency fs. Typical values for N range from 10^4 to >10^6" [1] fs : float Sampling frequency of the `x` time series. Defaults to 1.0. window : str Desired window to use. If `window` is a string or tuple, it is passed to `scipy.signal.get_window` to generate the window values, which are DFT-even by default. See `scipy.signal.get_window` for a list of windows and required parameters. Defaults to a Hann window. "Choose a window function w(j, l) to reduce spectral leakage within the estimate. ... The computations of the window function will be performed when the segment lengths L(j) have been determined." [1] fmin, fmax : float, optional Lowest and highest frequency to estimate. Defaults to `fs / len(x)` and the Nyquist frequency `fs / 2`, respectively. "... we propose not to use the first few frequency bins. The first frequency bin that yields unbiased spectral estimates depends on the window function used. The bin is given by the effective half-width of the window transfer function." [1]. Jdes : int, optional Desired number of Fourier frequencies. Defaults to 1000. "A typical value for J is 1000" [1] Kdes : int, optional Desired number of averages. Defaults to 100. Kmin : int, optional Minimum number of averages. Defaults to 1. xi : float, optional Fractional overlap between segments (0 <= xi < 1). Defaults to 0.5. "The amount of overlap is a trade-off between computational effort and flatness of the data weighting." [1]. See Figures 5 and 6 [1]. scaling : {'density', 'spectrum'}, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density'. Returns ------- f : 1-d array Vector of frequencies corresponding to Pxx Pxx : 1d-array Vector of (uncalibrated) power spectrum estimates Notes ----- The implementation follows references [1] and [2] quite closely; in particular, the variable names used in the program generally correspond to the variables in the paper; and the corresponding equation numbers are indicated in the comments. References ---------- [1] Michael Tröbs and Gerhard Heinzel, "Improved spectrum estimation from digitized time series on a logarithmic frequency axis" in Measurement, vol 39 (2006), pp 120-129. * http://dx.doi.org/10.1016/j.measurement.2005.10.010 * http://pubman.mpdl.mpg.de/pubman/item/escidoc:150688:1 [2] Michael Tröbs and Gerhard Heinzel, Corrigendum to "Improved spectrum estimation from digitized time series on a logarithmic frequency axis."<|endoftext|>
66aa909cd4952f294547d330e133c2461be6b7daba0251ce56ee2cabe6abfc8e
def to_oscillator_noise(self, method='welch', window='hann', **kwargs): '\n Create a OscillatorNoise object using the Welch method.\n\n Parameters\n ----------\n method : {"welch", "lpsd"}, optional\n The method used for calculating the oscillator noise. Defaults to Welch\n method.\n window : str or tuple or array_like, optional\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even\n by default. See `scipy.signal.get_window` for a list of windows and required\n parameters. If `window` is array_like it will be used directly as the window\n and its length must be nperseg. Defaults to a Hann window.\n **kwargs :\n Arguments will be passed to the function used for calculating the oscillator\n noise. Note that `scaling` and `return_onesided` are always set\n automatically for consistency.\n\n Returns\n -------\n OscillatorNoise\n ' assert (method in ['welch', 'lpsd']) if (method == 'welch'): (f, Pxx) = welch(self.freqs, self.sample_rate, window=window, return_onesided=True, scaling='density', **kwargs) elif (method == 'lpsd'): (f, Pxx) = lpsd(self.freqs, self.sample_rate, window=window, scaling='density', **kwargs) return OscillatorNoise(f, Pxx, representation='psd_freq', n_sided=1, divide_by=self.divide_by)
Create a OscillatorNoise object using the Welch method. Parameters ---------- method : {"welch", "lpsd"}, optional The method used for calculating the oscillator noise. Defaults to Welch method. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `scipy.signal.get_window` to generate the window values, which are DFT-even by default. See `scipy.signal.get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. **kwargs : Arguments will be passed to the function used for calculating the oscillator noise. Note that `scaling` and `return_onesided` are always set automatically for consistency. Returns ------- OscillatorNoise
freqtools/time_data.py
to_oscillator_noise
bleykauf/freqtools
0
python
def to_oscillator_noise(self, method='welch', window='hann', **kwargs): '\n Create a OscillatorNoise object using the Welch method.\n\n Parameters\n ----------\n method : {"welch", "lpsd"}, optional\n The method used for calculating the oscillator noise. Defaults to Welch\n method.\n window : str or tuple or array_like, optional\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even\n by default. See `scipy.signal.get_window` for a list of windows and required\n parameters. If `window` is array_like it will be used directly as the window\n and its length must be nperseg. Defaults to a Hann window.\n **kwargs :\n Arguments will be passed to the function used for calculating the oscillator\n noise. Note that `scaling` and `return_onesided` are always set\n automatically for consistency.\n\n Returns\n -------\n OscillatorNoise\n ' assert (method in ['welch', 'lpsd']) if (method == 'welch'): (f, Pxx) = welch(self.freqs, self.sample_rate, window=window, return_onesided=True, scaling='density', **kwargs) elif (method == 'lpsd'): (f, Pxx) = lpsd(self.freqs, self.sample_rate, window=window, scaling='density', **kwargs) return OscillatorNoise(f, Pxx, representation='psd_freq', n_sided=1, divide_by=self.divide_by)
def to_oscillator_noise(self, method='welch', window='hann', **kwargs): '\n Create a OscillatorNoise object using the Welch method.\n\n Parameters\n ----------\n method : {"welch", "lpsd"}, optional\n The method used for calculating the oscillator noise. Defaults to Welch\n method.\n window : str or tuple or array_like, optional\n Desired window to use. If `window` is a string or tuple, it is passed to\n `scipy.signal.get_window` to generate the window values, which are DFT-even\n by default. See `scipy.signal.get_window` for a list of windows and required\n parameters. If `window` is array_like it will be used directly as the window\n and its length must be nperseg. Defaults to a Hann window.\n **kwargs :\n Arguments will be passed to the function used for calculating the oscillator\n noise. Note that `scaling` and `return_onesided` are always set\n automatically for consistency.\n\n Returns\n -------\n OscillatorNoise\n ' assert (method in ['welch', 'lpsd']) if (method == 'welch'): (f, Pxx) = welch(self.freqs, self.sample_rate, window=window, return_onesided=True, scaling='density', **kwargs) elif (method == 'lpsd'): (f, Pxx) = lpsd(self.freqs, self.sample_rate, window=window, scaling='density', **kwargs) return OscillatorNoise(f, Pxx, representation='psd_freq', n_sided=1, divide_by=self.divide_by)<|docstring|>Create a OscillatorNoise object using the Welch method. Parameters ---------- method : {"welch", "lpsd"}, optional The method used for calculating the oscillator noise. Defaults to Welch method. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `scipy.signal.get_window` to generate the window values, which are DFT-even by default. See `scipy.signal.get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. **kwargs : Arguments will be passed to the function used for calculating the oscillator noise. Note that `scaling` and `return_onesided` are always set automatically for consistency. Returns ------- OscillatorNoise<|endoftext|>
67e184d218e42d534c978689871f7a359a9062d1890cbb126138d534ffd482da
def adev(self, scaling=1): '\n Calculates the Allan deviation of the data.\n\n Parameters\n ----------\n scaling : float (optional)\n normalization factor, i.e. the oscillator frequency ν_0\n\n Returns\n -------\n taus, adev, adeverror : list\n The taus for which the Allan deviation has been calculated, the adev at\n these taus and their statistical error.\n ' freqs = (np.array(self.freqs) * scaling) tau_max = np.log10(len(self.freqs)) taus = (np.logspace(0, tau_max) / self.sample_rate) (taus, adev, adeverror, _) = allantools.adev(freqs, data_type='freq', rate=self.sample_rate, taus=taus) return (taus, adev, adeverror)
Calculates the Allan deviation of the data. Parameters ---------- scaling : float (optional) normalization factor, i.e. the oscillator frequency ν_0 Returns ------- taus, adev, adeverror : list The taus for which the Allan deviation has been calculated, the adev at these taus and their statistical error.
freqtools/time_data.py
adev
bleykauf/freqtools
0
python
def adev(self, scaling=1): '\n Calculates the Allan deviation of the data.\n\n Parameters\n ----------\n scaling : float (optional)\n normalization factor, i.e. the oscillator frequency ν_0\n\n Returns\n -------\n taus, adev, adeverror : list\n The taus for which the Allan deviation has been calculated, the adev at\n these taus and their statistical error.\n ' freqs = (np.array(self.freqs) * scaling) tau_max = np.log10(len(self.freqs)) taus = (np.logspace(0, tau_max) / self.sample_rate) (taus, adev, adeverror, _) = allantools.adev(freqs, data_type='freq', rate=self.sample_rate, taus=taus) return (taus, adev, adeverror)
def adev(self, scaling=1): '\n Calculates the Allan deviation of the data.\n\n Parameters\n ----------\n scaling : float (optional)\n normalization factor, i.e. the oscillator frequency ν_0\n\n Returns\n -------\n taus, adev, adeverror : list\n The taus for which the Allan deviation has been calculated, the adev at\n these taus and their statistical error.\n ' freqs = (np.array(self.freqs) * scaling) tau_max = np.log10(len(self.freqs)) taus = (np.logspace(0, tau_max) / self.sample_rate) (taus, adev, adeverror, _) = allantools.adev(freqs, data_type='freq', rate=self.sample_rate, taus=taus) return (taus, adev, adeverror)<|docstring|>Calculates the Allan deviation of the data. Parameters ---------- scaling : float (optional) normalization factor, i.e. the oscillator frequency ν_0 Returns ------- taus, adev, adeverror : list The taus for which the Allan deviation has been calculated, the adev at these taus and their statistical error.<|endoftext|>
7e630dd71610026ee27bf64e1764b4a12a1754f8487d5c50c4ec7747e9056fca
def plot_time_record(self, ax=None): '\n Plots the time record of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' t = np.linspace(0, self.duration, num=self.n_samples) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.plot(t, self.freqs, label='Mean frequency: ({:3f}+/-{:3f}) MHz'.format((self.mean_frequency * 1e-06), (np.std(self.freqs) * 1e-06))) ax.set_xlabel('time t (s)') ax.set_ylabel('frequency deviation (Hz)') ax.legend() plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)
Plots the time record of the data. Parameters ---------- ax : Axis (optional) If axis is provided, they will be used for the plot. if not provided, a new plot will automatically be created. Returns ------- fig, ax : Figure and Axis The Figure and Axis handles of the plot that was used.
freqtools/time_data.py
plot_time_record
bleykauf/freqtools
0
python
def plot_time_record(self, ax=None): '\n Plots the time record of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' t = np.linspace(0, self.duration, num=self.n_samples) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.plot(t, self.freqs, label='Mean frequency: ({:3f}+/-{:3f}) MHz'.format((self.mean_frequency * 1e-06), (np.std(self.freqs) * 1e-06))) ax.set_xlabel('time t (s)') ax.set_ylabel('frequency deviation (Hz)') ax.legend() plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)
def plot_time_record(self, ax=None): '\n Plots the time record of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' t = np.linspace(0, self.duration, num=self.n_samples) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.plot(t, self.freqs, label='Mean frequency: ({:3f}+/-{:3f}) MHz'.format((self.mean_frequency * 1e-06), (np.std(self.freqs) * 1e-06))) ax.set_xlabel('time t (s)') ax.set_ylabel('frequency deviation (Hz)') ax.legend() plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)<|docstring|>Plots the time record of the data. Parameters ---------- ax : Axis (optional) If axis is provided, they will be used for the plot. if not provided, a new plot will automatically be created. Returns ------- fig, ax : Figure and Axis The Figure and Axis handles of the plot that was used.<|endoftext|>
a6676f0ab287cf8c2386e049fa4d1cd01b5ee7bf1445aeb4e9da8e170adadaf8
def plot_adev(self, ax=None, **kwargs): '\n Plots the Allan deviation of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n **kwargs:\n keyworded arguments passed to `adev()`.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' (taus, adev, adeverror) = self.adev(**kwargs) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.set_yscale('log') ax.set_xscale('log') ax.errorbar(taus, adev, yerr=adeverror) ax.set_xlabel('Averaging time t (s)') ax.set_ylabel('Allan deviation $\\sigma_y(t)$') plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)
Plots the Allan deviation of the data. Parameters ---------- ax : Axis (optional) If axis is provided, they will be used for the plot. if not provided, a new plot will automatically be created. **kwargs: keyworded arguments passed to `adev()`. Returns ------- fig, ax : Figure and Axis The Figure and Axis handles of the plot that was used.
freqtools/time_data.py
plot_adev
bleykauf/freqtools
0
python
def plot_adev(self, ax=None, **kwargs): '\n Plots the Allan deviation of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n **kwargs:\n keyworded arguments passed to `adev()`.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' (taus, adev, adeverror) = self.adev(**kwargs) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.set_yscale('log') ax.set_xscale('log') ax.errorbar(taus, adev, yerr=adeverror) ax.set_xlabel('Averaging time t (s)') ax.set_ylabel('Allan deviation $\\sigma_y(t)$') plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)
def plot_adev(self, ax=None, **kwargs): '\n Plots the Allan deviation of the data.\n\n Parameters\n ----------\n ax : Axis (optional)\n If axis is provided, they will be used for the plot. if not provided, a new\n plot will automatically be created.\n **kwargs:\n keyworded arguments passed to `adev()`.\n\n Returns\n -------\n fig, ax : Figure and Axis\n The Figure and Axis handles of the plot that was used.\n ' (taus, adev, adeverror) = self.adev(**kwargs) if (ax is None): (fig, ax) = plt.subplots() else: fig = ax.figure ax.set_yscale('log') ax.set_xscale('log') ax.errorbar(taus, adev, yerr=adeverror) ax.set_xlabel('Averaging time t (s)') ax.set_ylabel('Allan deviation $\\sigma_y(t)$') plt.grid(b='on', which='minor', axis='both') plt.box(on='on') return (fig, ax)<|docstring|>Plots the Allan deviation of the data. Parameters ---------- ax : Axis (optional) If axis is provided, they will be used for the plot. if not provided, a new plot will automatically be created. **kwargs: keyworded arguments passed to `adev()`. Returns ------- fig, ax : Figure and Axis The Figure and Axis handles of the plot that was used.<|endoftext|>
cae98b213a493a07b2696c0523f87dbbdc388122834b279a998f42a45304d747
def save_file_to_fe(all_info, spec_fp=settings.JSON_COLORS_DATA_DUMP_FOR_FRONTEND_CHINESE): '\n 保存信息到前端目录\n :param all_info:\n :param is_chinese:bool,\n :return:\n ' save_fp = get_frontend_path(spec_fp=spec_fp) bak_fp = '.'.join([save_fp, 'bak']) print(bak_fp, save_fp) with open(save_fp, 'w') as jf: json.dump(all_info, jf, ensure_ascii=False, indent=2) return 0
保存信息到前端目录 :param all_info: :param is_chinese:bool, :return:
main.py
save_file_to_fe
imoyao/Traditional-Chinese-Colors
2
python
def save_file_to_fe(all_info, spec_fp=settings.JSON_COLORS_DATA_DUMP_FOR_FRONTEND_CHINESE): '\n 保存信息到前端目录\n :param all_info:\n :param is_chinese:bool,\n :return:\n ' save_fp = get_frontend_path(spec_fp=spec_fp) bak_fp = '.'.join([save_fp, 'bak']) print(bak_fp, save_fp) with open(save_fp, 'w') as jf: json.dump(all_info, jf, ensure_ascii=False, indent=2) return 0
def save_file_to_fe(all_info, spec_fp=settings.JSON_COLORS_DATA_DUMP_FOR_FRONTEND_CHINESE): '\n 保存信息到前端目录\n :param all_info:\n :param is_chinese:bool,\n :return:\n ' save_fp = get_frontend_path(spec_fp=spec_fp) bak_fp = '.'.join([save_fp, 'bak']) print(bak_fp, save_fp) with open(save_fp, 'w') as jf: json.dump(all_info, jf, ensure_ascii=False, indent=2) return 0<|docstring|>保存信息到前端目录 :param all_info: :param is_chinese:bool, :return:<|endoftext|>
4f1b18b9bb0790119fa450d5909825c8a8d2a844904b0d729902d9872caa8f05
def testStatement(self): 'Test Statement' pass
Test Statement
test/test_statement.py
testStatement
Factern/factern-client-python
0
python
def testStatement(self): pass
def testStatement(self): pass<|docstring|>Test Statement<|endoftext|>
af1bbeffc761af0e7a956725daa214c51a0f539b751aba2a896e0c15f5a18a3b
def pose_dict_to_numpy(pose): '\n Conert pandaset pose dict to a numpy vector in order to pass it through the network\n ' pose_np = [pose['position']['x'], pose['position']['y'], pose['position']['z'], pose['heading']['w'], pose['heading']['x'], pose['heading']['y'], pose['heading']['z']] return pose_np
Conert pandaset pose dict to a numpy vector in order to pass it through the network
pcdet/datasets/pandaset/pandaset_dataset.py
pose_dict_to_numpy
CSL-KU/OpenPCDet
1,984
python
def pose_dict_to_numpy(pose): '\n \n ' pose_np = [pose['position']['x'], pose['position']['y'], pose['position']['z'], pose['heading']['w'], pose['heading']['x'], pose['heading']['y'], pose['heading']['z']] return pose_np
def pose_dict_to_numpy(pose): '\n \n ' pose_np = [pose['position']['x'], pose['position']['y'], pose['position']['z'], pose['heading']['w'], pose['heading']['x'], pose['heading']['y'], pose['heading']['z']] return pose_np<|docstring|>Conert pandaset pose dict to a numpy vector in order to pass it through the network<|endoftext|>
066e2a6591576beb51fe1c992281984a9927f92863202fb40c54f7cf3c9e54a6
def pose_numpy_to_dict(pose): '\n Conert pandaset pose dict to a numpy vector in order to pass it through the network\n ' pose_dict = {'position': {'x': pose[0], 'y': pose[1], 'z': pose[2]}, 'heading': {'w': pose[3], 'x': pose[4], 'y': pose[5], 'z': pose[6]}} return pose_dict
Conert pandaset pose dict to a numpy vector in order to pass it through the network
pcdet/datasets/pandaset/pandaset_dataset.py
pose_numpy_to_dict
CSL-KU/OpenPCDet
1,984
python
def pose_numpy_to_dict(pose): '\n \n ' pose_dict = {'position': {'x': pose[0], 'y': pose[1], 'z': pose[2]}, 'heading': {'w': pose[3], 'x': pose[4], 'y': pose[5], 'z': pose[6]}} return pose_dict
def pose_numpy_to_dict(pose): '\n \n ' pose_dict = {'position': {'x': pose[0], 'y': pose[1], 'z': pose[2]}, 'heading': {'w': pose[3], 'x': pose[4], 'y': pose[5], 'z': pose[6]}} return pose_dict<|docstring|>Conert pandaset pose dict to a numpy vector in order to pass it through the network<|endoftext|>
255a90d7cdbbaf23010eb4df37faebd5f782e5d2c7265b562a41222a1c3932c4
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path): '\n Create dataset_infos files in order not to have it in a preprocessed pickle\n file with the info for each sample\n See PandasetDataset.get_infos for further details.\n ' dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) for split in ['train', 'val', 'test']: print('---------------- Start to generate {} data infos ---------------'.format(split)) dataset.set_split(split) infos = dataset.get_infos() file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split)) with open(file_path, 'wb') as f: pickle.dump(infos, f) print('Pandaset info {} file is saved to {}'.format(split, file_path)) print('------------Start create groundtruth database for data augmentation-----------') dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) dataset.set_split('train') dataset.create_groundtruth_database(os.path.join(save_path, 'pandaset_infos_train.pkl'), split='train') print('---------------Data preparation Done---------------')
Create dataset_infos files in order not to have it in a preprocessed pickle file with the info for each sample See PandasetDataset.get_infos for further details.
pcdet/datasets/pandaset/pandaset_dataset.py
create_pandaset_infos
CSL-KU/OpenPCDet
1,984
python
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path): '\n Create dataset_infos files in order not to have it in a preprocessed pickle\n file with the info for each sample\n See PandasetDataset.get_infos for further details.\n ' dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) for split in ['train', 'val', 'test']: print('---------------- Start to generate {} data infos ---------------'.format(split)) dataset.set_split(split) infos = dataset.get_infos() file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split)) with open(file_path, 'wb') as f: pickle.dump(infos, f) print('Pandaset info {} file is saved to {}'.format(split, file_path)) print('------------Start create groundtruth database for data augmentation-----------') dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) dataset.set_split('train') dataset.create_groundtruth_database(os.path.join(save_path, 'pandaset_infos_train.pkl'), split='train') print('---------------Data preparation Done---------------')
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path): '\n Create dataset_infos files in order not to have it in a preprocessed pickle\n file with the info for each sample\n See PandasetDataset.get_infos for further details.\n ' dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) for split in ['train', 'val', 'test']: print('---------------- Start to generate {} data infos ---------------'.format(split)) dataset.set_split(split) infos = dataset.get_infos() file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split)) with open(file_path, 'wb') as f: pickle.dump(infos, f) print('Pandaset info {} file is saved to {}'.format(split, file_path)) print('------------Start create groundtruth database for data augmentation-----------') dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) dataset.set_split('train') dataset.create_groundtruth_database(os.path.join(save_path, 'pandaset_infos_train.pkl'), split='train') print('---------------Data preparation Done---------------')<|docstring|>Create dataset_infos files in order not to have it in a preprocessed pickle file with the info for each sample See PandasetDataset.get_infos for further details.<|endoftext|>
d1a31f9037c15c6290be1dff7e025adb533f6c7e9de3db73c483f591e487c21a
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): '\n Args:\n root_path:\n dataset_cfg:\n class_names:\n training:\n logger:\n ' super().__init__(dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger) if (root_path is None): root_path = self.dataset_cfg.DATA_PATH self.dataset = ps.DataSet(os.path.join(root_path, 'dataset')) self.split = self.dataset_cfg.DATA_SPLIT[self.mode] self.pandaset_infos = [] self.include_pandaset_infos(self.mode)
Args: root_path: dataset_cfg: class_names: training: logger:
pcdet/datasets/pandaset/pandaset_dataset.py
__init__
CSL-KU/OpenPCDet
1,984
python
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): '\n Args:\n root_path:\n dataset_cfg:\n class_names:\n training:\n logger:\n ' super().__init__(dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger) if (root_path is None): root_path = self.dataset_cfg.DATA_PATH self.dataset = ps.DataSet(os.path.join(root_path, 'dataset')) self.split = self.dataset_cfg.DATA_SPLIT[self.mode] self.pandaset_infos = [] self.include_pandaset_infos(self.mode)
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): '\n Args:\n root_path:\n dataset_cfg:\n class_names:\n training:\n logger:\n ' super().__init__(dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger) if (root_path is None): root_path = self.dataset_cfg.DATA_PATH self.dataset = ps.DataSet(os.path.join(root_path, 'dataset')) self.split = self.dataset_cfg.DATA_SPLIT[self.mode] self.pandaset_infos = [] self.include_pandaset_infos(self.mode)<|docstring|>Args: root_path: dataset_cfg: class_names: training: logger:<|endoftext|>
be14a4692c977762fb69e4900402d91de35bc26676158156bc04b8edff36da71
def __getitem__(self, index): '\n To support a custom dataset, implement this function to load the raw data (and labels), then transform them to\n the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them\n to the model.\n\n Args:\n index:\n\n Returns:\n\n ' info = self.pandaset_infos[index] seq_idx = info['sequence'] pose = self._get_pose(info) points = self._get_lidar_points(info, pose) (boxes, labels, zrot_world_to_ego) = self._get_annotations(info, pose) pose_np = pose_dict_to_numpy(pose) input_dict = {'points': points, 'gt_boxes': boxes, 'gt_names': labels, 'sequence': int(seq_idx), 'frame_idx': info['frame_idx'], 'zrot_world_to_ego': zrot_world_to_ego, 'pose': pose_dict_to_numpy(pose)} data_dict = self.prepare_data(data_dict=input_dict) return data_dict
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them to the model. Args: index: Returns:
pcdet/datasets/pandaset/pandaset_dataset.py
__getitem__
CSL-KU/OpenPCDet
1,984
python
def __getitem__(self, index): '\n To support a custom dataset, implement this function to load the raw data (and labels), then transform them to\n the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them\n to the model.\n\n Args:\n index:\n\n Returns:\n\n ' info = self.pandaset_infos[index] seq_idx = info['sequence'] pose = self._get_pose(info) points = self._get_lidar_points(info, pose) (boxes, labels, zrot_world_to_ego) = self._get_annotations(info, pose) pose_np = pose_dict_to_numpy(pose) input_dict = {'points': points, 'gt_boxes': boxes, 'gt_names': labels, 'sequence': int(seq_idx), 'frame_idx': info['frame_idx'], 'zrot_world_to_ego': zrot_world_to_ego, 'pose': pose_dict_to_numpy(pose)} data_dict = self.prepare_data(data_dict=input_dict) return data_dict
def __getitem__(self, index): '\n To support a custom dataset, implement this function to load the raw data (and labels), then transform them to\n the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them\n to the model.\n\n Args:\n index:\n\n Returns:\n\n ' info = self.pandaset_infos[index] seq_idx = info['sequence'] pose = self._get_pose(info) points = self._get_lidar_points(info, pose) (boxes, labels, zrot_world_to_ego) = self._get_annotations(info, pose) pose_np = pose_dict_to_numpy(pose) input_dict = {'points': points, 'gt_boxes': boxes, 'gt_names': labels, 'sequence': int(seq_idx), 'frame_idx': info['frame_idx'], 'zrot_world_to_ego': zrot_world_to_ego, 'pose': pose_dict_to_numpy(pose)} data_dict = self.prepare_data(data_dict=input_dict) return data_dict<|docstring|>To support a custom dataset, implement this function to load the raw data (and labels), then transform them to the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them to the model. Args: index: Returns:<|endoftext|>
d07aabe767bc70ab5c24e298ed3e1baa675f8cc8c4a65934d3bfa158b22ae910
def _get_lidar_points(self, info, pose): '\n Get lidar in the unified normative coordinate system for a given frame\n The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)\n ' lidar_frame = pd.read_pickle(info['lidar_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): lidar_frame = lidar_frame[(lidar_frame.d == device)] world_points = lidar_frame.to_numpy() del lidar_frame points_loc = world_points[(:, :3)] points_int = world_points[(:, 3)] points_int = (points_int / 255) ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose) ego_points = ego_points[(:, [1, 0, 2])] ego_points[(:, 1)] = (- ego_points[(:, 1)]) return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)
Get lidar in the unified normative coordinate system for a given frame The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)
pcdet/datasets/pandaset/pandaset_dataset.py
_get_lidar_points
CSL-KU/OpenPCDet
1,984
python
def _get_lidar_points(self, info, pose): '\n Get lidar in the unified normative coordinate system for a given frame\n The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)\n ' lidar_frame = pd.read_pickle(info['lidar_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): lidar_frame = lidar_frame[(lidar_frame.d == device)] world_points = lidar_frame.to_numpy() del lidar_frame points_loc = world_points[(:, :3)] points_int = world_points[(:, 3)] points_int = (points_int / 255) ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose) ego_points = ego_points[(:, [1, 0, 2])] ego_points[(:, 1)] = (- ego_points[(:, 1)]) return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)
def _get_lidar_points(self, info, pose): '\n Get lidar in the unified normative coordinate system for a given frame\n The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)\n ' lidar_frame = pd.read_pickle(info['lidar_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): lidar_frame = lidar_frame[(lidar_frame.d == device)] world_points = lidar_frame.to_numpy() del lidar_frame points_loc = world_points[(:, :3)] points_int = world_points[(:, 3)] points_int = (points_int / 255) ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose) ego_points = ego_points[(:, [1, 0, 2])] ego_points[(:, 1)] = (- ego_points[(:, 1)]) return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)<|docstring|>Get lidar in the unified normative coordinate system for a given frame The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)<|endoftext|>
c52b9a167fd34559bc2a9aed29d8b12e120e5aadb9bc97a2d6566c0d605ffa1c
def _get_annotations(self, info, pose): '\n Get box informations in the unified normative coordinate system for a given frame\n ' cuboids = pd.read_pickle(info['cuboids_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): cuboids = cuboids[(cuboids['cuboids.sensor_id'] != (1 - device))] xs = cuboids['position.x'].to_numpy() ys = cuboids['position.y'].to_numpy() zs = cuboids['position.z'].to_numpy() dxs = cuboids['dimensions.x'].to_numpy() dys = cuboids['dimensions.y'].to_numpy() dzs = cuboids['dimensions.z'].to_numpy() yaws = cuboids['yaw'].to_numpy() labels = cuboids['label'].to_numpy() del cuboids labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab) for lab in labels]) centers = np.vstack([xs, ys, zs]).T ego_centers = ps.geometry.lidar_points_to_ego(centers, pose) yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1.0, 0]]), pose) yaxis_from_pose = (yaxis_points_from_pose[(1, :)] - yaxis_points_from_pose[(0, :)]) if (yaxis_from_pose[(- 1)] >= (10 ** (- 1))): if (self.logger is not None): self.logger.warning(("The car's pitch is supposed to be negligible " + 'sin(pitch) is >= 10**-1 ({})'.format(yaxis_from_pose[(- 1)]))) zrot_world_to_ego = np.arctan2((- yaxis_from_pose[0]), yaxis_from_pose[1]) ego_yaws = (yaws + zrot_world_to_ego) ego_xs = ego_centers[(:, 1)] ego_ys = (- ego_centers[(:, 0)]) ego_zs = ego_centers[(:, 2)] ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T return (ego_boxes.astype(np.float32), labels, zrot_world_to_ego)
Get box informations in the unified normative coordinate system for a given frame
pcdet/datasets/pandaset/pandaset_dataset.py
_get_annotations
CSL-KU/OpenPCDet
1,984
python
def _get_annotations(self, info, pose): '\n \n ' cuboids = pd.read_pickle(info['cuboids_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): cuboids = cuboids[(cuboids['cuboids.sensor_id'] != (1 - device))] xs = cuboids['position.x'].to_numpy() ys = cuboids['position.y'].to_numpy() zs = cuboids['position.z'].to_numpy() dxs = cuboids['dimensions.x'].to_numpy() dys = cuboids['dimensions.y'].to_numpy() dzs = cuboids['dimensions.z'].to_numpy() yaws = cuboids['yaw'].to_numpy() labels = cuboids['label'].to_numpy() del cuboids labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab) for lab in labels]) centers = np.vstack([xs, ys, zs]).T ego_centers = ps.geometry.lidar_points_to_ego(centers, pose) yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1.0, 0]]), pose) yaxis_from_pose = (yaxis_points_from_pose[(1, :)] - yaxis_points_from_pose[(0, :)]) if (yaxis_from_pose[(- 1)] >= (10 ** (- 1))): if (self.logger is not None): self.logger.warning(("The car's pitch is supposed to be negligible " + 'sin(pitch) is >= 10**-1 ({})'.format(yaxis_from_pose[(- 1)]))) zrot_world_to_ego = np.arctan2((- yaxis_from_pose[0]), yaxis_from_pose[1]) ego_yaws = (yaws + zrot_world_to_ego) ego_xs = ego_centers[(:, 1)] ego_ys = (- ego_centers[(:, 0)]) ego_zs = ego_centers[(:, 2)] ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T return (ego_boxes.astype(np.float32), labels, zrot_world_to_ego)
def _get_annotations(self, info, pose): '\n \n ' cuboids = pd.read_pickle(info['cuboids_path']) device = self.dataset_cfg.get('LIDAR_DEVICE', 0) if (device != (- 1)): cuboids = cuboids[(cuboids['cuboids.sensor_id'] != (1 - device))] xs = cuboids['position.x'].to_numpy() ys = cuboids['position.y'].to_numpy() zs = cuboids['position.z'].to_numpy() dxs = cuboids['dimensions.x'].to_numpy() dys = cuboids['dimensions.y'].to_numpy() dzs = cuboids['dimensions.z'].to_numpy() yaws = cuboids['yaw'].to_numpy() labels = cuboids['label'].to_numpy() del cuboids labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab) for lab in labels]) centers = np.vstack([xs, ys, zs]).T ego_centers = ps.geometry.lidar_points_to_ego(centers, pose) yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1.0, 0]]), pose) yaxis_from_pose = (yaxis_points_from_pose[(1, :)] - yaxis_points_from_pose[(0, :)]) if (yaxis_from_pose[(- 1)] >= (10 ** (- 1))): if (self.logger is not None): self.logger.warning(("The car's pitch is supposed to be negligible " + 'sin(pitch) is >= 10**-1 ({})'.format(yaxis_from_pose[(- 1)]))) zrot_world_to_ego = np.arctan2((- yaxis_from_pose[0]), yaxis_from_pose[1]) ego_yaws = (yaws + zrot_world_to_ego) ego_xs = ego_centers[(:, 1)] ego_ys = (- ego_centers[(:, 0)]) ego_zs = ego_centers[(:, 2)] ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T return (ego_boxes.astype(np.float32), labels, zrot_world_to_ego)<|docstring|>Get box informations in the unified normative coordinate system for a given frame<|endoftext|>
fdd9724acfa5c4423cbe677ecf28f248b4fe12bdaff3a955f353c367d9ed74b5
@staticmethod def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): '\n To support a custom dataset, implement this function to receive the predicted results from the model, and then\n transform the unified normative coordinate to your required coordinate, and optionally save them to disk.\n\n Args:\n batch_dict: dict of original data from the dataloader\n pred_dicts: dict of predicted results from the model\n pred_boxes: (N, 7), Tensor\n pred_scores: (N), Tensor\n pred_labels: (N), Tensor\n class_names:\n output_path: if it is not None, save the results to this path\n Returns:\n\n ' def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose): pred_boxes = box_dict['pred_boxes'].cpu().numpy() pred_scores = box_dict['pred_scores'].cpu().numpy() pred_labels = box_dict['pred_labels'].cpu().numpy() zrot = zrot_world_to_ego.cpu().numpy() pose_dict = pose_numpy_to_dict(pose.cpu().numpy()) xs = pred_boxes[(:, 0)] ys = pred_boxes[(:, 1)] zs = pred_boxes[(:, 2)] dxs = pred_boxes[(:, 3)] dys = pred_boxes[(:, 4)] dzs = pred_boxes[(:, 5)] yaws = pred_boxes[(:, 6)] names = np.array(class_names)[(pred_labels - 1)] ego_xs = (- ys) ego_ys = xs ego_zs = zs ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_yaws = yaws world_yaws = (ego_yaws - zrot) ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict) world_xs = world_centers[(:, 0)] world_ys = world_centers[(:, 1)] world_zs = world_centers[(:, 2)] data_dict = {'position.x': world_xs, 'position.y': world_ys, 'position.z': world_zs, 'dimensions.x': ego_dxs, 'dimensions.y': ego_dys, 'dimensions.z': ego_dzs, 'yaw': (world_yaws % (2 * np.pi)), 'label': names, 'score': pred_scores} return pd.DataFrame(data_dict) annos = [] for (index, box_dict) in enumerate(pred_dicts): frame_idx = batch_dict['frame_idx'][index] seq_idx = batch_dict['sequence'][index] zrot = batch_dict['zrot_world_to_ego'][index] pose = batch_dict['pose'][index] single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose) single_pred_dict = {'preds': single_pred_df, 'name': single_pred_df['label'].tolist(), 'frame_idx': frame_idx, 'sequence': str(seq_idx).zfill(3)} if (output_path is not None): frame_id = str(int(frame_idx)).zfill(2) seq_id = str(int(seq_idx)).zfill(3) cur_det_file = os.path.join(output_path, seq_id, 'predictions', 'cuboids', '{}.pkl.gz'.format(frame_id)) os.makedirs(os.path.dirname(cur_det_file), exist_ok=True) single_pred_df.to_pickle(cur_det_file) annos.append(single_pred_dict) return annos
To support a custom dataset, implement this function to receive the predicted results from the model, and then transform the unified normative coordinate to your required coordinate, and optionally save them to disk. Args: batch_dict: dict of original data from the dataloader pred_dicts: dict of predicted results from the model pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: if it is not None, save the results to this path Returns:
pcdet/datasets/pandaset/pandaset_dataset.py
generate_prediction_dicts
CSL-KU/OpenPCDet
1,984
python
@staticmethod def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): '\n To support a custom dataset, implement this function to receive the predicted results from the model, and then\n transform the unified normative coordinate to your required coordinate, and optionally save them to disk.\n\n Args:\n batch_dict: dict of original data from the dataloader\n pred_dicts: dict of predicted results from the model\n pred_boxes: (N, 7), Tensor\n pred_scores: (N), Tensor\n pred_labels: (N), Tensor\n class_names:\n output_path: if it is not None, save the results to this path\n Returns:\n\n ' def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose): pred_boxes = box_dict['pred_boxes'].cpu().numpy() pred_scores = box_dict['pred_scores'].cpu().numpy() pred_labels = box_dict['pred_labels'].cpu().numpy() zrot = zrot_world_to_ego.cpu().numpy() pose_dict = pose_numpy_to_dict(pose.cpu().numpy()) xs = pred_boxes[(:, 0)] ys = pred_boxes[(:, 1)] zs = pred_boxes[(:, 2)] dxs = pred_boxes[(:, 3)] dys = pred_boxes[(:, 4)] dzs = pred_boxes[(:, 5)] yaws = pred_boxes[(:, 6)] names = np.array(class_names)[(pred_labels - 1)] ego_xs = (- ys) ego_ys = xs ego_zs = zs ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_yaws = yaws world_yaws = (ego_yaws - zrot) ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict) world_xs = world_centers[(:, 0)] world_ys = world_centers[(:, 1)] world_zs = world_centers[(:, 2)] data_dict = {'position.x': world_xs, 'position.y': world_ys, 'position.z': world_zs, 'dimensions.x': ego_dxs, 'dimensions.y': ego_dys, 'dimensions.z': ego_dzs, 'yaw': (world_yaws % (2 * np.pi)), 'label': names, 'score': pred_scores} return pd.DataFrame(data_dict) annos = [] for (index, box_dict) in enumerate(pred_dicts): frame_idx = batch_dict['frame_idx'][index] seq_idx = batch_dict['sequence'][index] zrot = batch_dict['zrot_world_to_ego'][index] pose = batch_dict['pose'][index] single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose) single_pred_dict = {'preds': single_pred_df, 'name': single_pred_df['label'].tolist(), 'frame_idx': frame_idx, 'sequence': str(seq_idx).zfill(3)} if (output_path is not None): frame_id = str(int(frame_idx)).zfill(2) seq_id = str(int(seq_idx)).zfill(3) cur_det_file = os.path.join(output_path, seq_id, 'predictions', 'cuboids', '{}.pkl.gz'.format(frame_id)) os.makedirs(os.path.dirname(cur_det_file), exist_ok=True) single_pred_df.to_pickle(cur_det_file) annos.append(single_pred_dict) return annos
@staticmethod def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): '\n To support a custom dataset, implement this function to receive the predicted results from the model, and then\n transform the unified normative coordinate to your required coordinate, and optionally save them to disk.\n\n Args:\n batch_dict: dict of original data from the dataloader\n pred_dicts: dict of predicted results from the model\n pred_boxes: (N, 7), Tensor\n pred_scores: (N), Tensor\n pred_labels: (N), Tensor\n class_names:\n output_path: if it is not None, save the results to this path\n Returns:\n\n ' def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose): pred_boxes = box_dict['pred_boxes'].cpu().numpy() pred_scores = box_dict['pred_scores'].cpu().numpy() pred_labels = box_dict['pred_labels'].cpu().numpy() zrot = zrot_world_to_ego.cpu().numpy() pose_dict = pose_numpy_to_dict(pose.cpu().numpy()) xs = pred_boxes[(:, 0)] ys = pred_boxes[(:, 1)] zs = pred_boxes[(:, 2)] dxs = pred_boxes[(:, 3)] dys = pred_boxes[(:, 4)] dzs = pred_boxes[(:, 5)] yaws = pred_boxes[(:, 6)] names = np.array(class_names)[(pred_labels - 1)] ego_xs = (- ys) ego_ys = xs ego_zs = zs ego_dxs = dys ego_dys = dxs ego_dzs = dzs ego_yaws = yaws world_yaws = (ego_yaws - zrot) ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict) world_xs = world_centers[(:, 0)] world_ys = world_centers[(:, 1)] world_zs = world_centers[(:, 2)] data_dict = {'position.x': world_xs, 'position.y': world_ys, 'position.z': world_zs, 'dimensions.x': ego_dxs, 'dimensions.y': ego_dys, 'dimensions.z': ego_dzs, 'yaw': (world_yaws % (2 * np.pi)), 'label': names, 'score': pred_scores} return pd.DataFrame(data_dict) annos = [] for (index, box_dict) in enumerate(pred_dicts): frame_idx = batch_dict['frame_idx'][index] seq_idx = batch_dict['sequence'][index] zrot = batch_dict['zrot_world_to_ego'][index] pose = batch_dict['pose'][index] single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose) single_pred_dict = {'preds': single_pred_df, 'name': single_pred_df['label'].tolist(), 'frame_idx': frame_idx, 'sequence': str(seq_idx).zfill(3)} if (output_path is not None): frame_id = str(int(frame_idx)).zfill(2) seq_id = str(int(seq_idx)).zfill(3) cur_det_file = os.path.join(output_path, seq_id, 'predictions', 'cuboids', '{}.pkl.gz'.format(frame_id)) os.makedirs(os.path.dirname(cur_det_file), exist_ok=True) single_pred_df.to_pickle(cur_det_file) annos.append(single_pred_dict) return annos<|docstring|>To support a custom dataset, implement this function to receive the predicted results from the model, and then transform the unified normative coordinate to your required coordinate, and optionally save them to disk. Args: batch_dict: dict of original data from the dataloader pred_dicts: dict of predicted results from the model pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: if it is not None, save the results to this path Returns:<|endoftext|>
6c65009440eefde283cee95c43d9653d35c6fe6dfbf75c11a6724509f71f9213
def get_infos(self): '\n Generate the dataset infos dict for each sample of the dataset.\n For each sample, this dict contains:\n - the sequence index\n - the frame index\n - the path to the lidar data\n - the path to the bounding box annotations\n ' infos = [] for seq in self.sequences: s = self.dataset[seq] s.load_lidar() if (len(s.lidar.data) > 100): raise ValueError(('The implementation for this dataset assumes that each sequence is ' + 'no longer than 100 frames. The current sequence has {}'.format(len(s.lidar.data)))) info = [{'sequence': seq, 'frame_idx': ii, 'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', '{:02d}.pkl.gz'.format(ii)), 'cuboids_path': os.path.join(self.root_path, 'dataset', seq, 'annotations', 'cuboids', '{:02d}.pkl.gz'.format(ii))} for ii in range(len(s.lidar.data))] infos.extend(info) del self.dataset._sequences[seq] return infos
Generate the dataset infos dict for each sample of the dataset. For each sample, this dict contains: - the sequence index - the frame index - the path to the lidar data - the path to the bounding box annotations
pcdet/datasets/pandaset/pandaset_dataset.py
get_infos
CSL-KU/OpenPCDet
1,984
python
def get_infos(self): '\n Generate the dataset infos dict for each sample of the dataset.\n For each sample, this dict contains:\n - the sequence index\n - the frame index\n - the path to the lidar data\n - the path to the bounding box annotations\n ' infos = [] for seq in self.sequences: s = self.dataset[seq] s.load_lidar() if (len(s.lidar.data) > 100): raise ValueError(('The implementation for this dataset assumes that each sequence is ' + 'no longer than 100 frames. The current sequence has {}'.format(len(s.lidar.data)))) info = [{'sequence': seq, 'frame_idx': ii, 'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', '{:02d}.pkl.gz'.format(ii)), 'cuboids_path': os.path.join(self.root_path, 'dataset', seq, 'annotations', 'cuboids', '{:02d}.pkl.gz'.format(ii))} for ii in range(len(s.lidar.data))] infos.extend(info) del self.dataset._sequences[seq] return infos
def get_infos(self): '\n Generate the dataset infos dict for each sample of the dataset.\n For each sample, this dict contains:\n - the sequence index\n - the frame index\n - the path to the lidar data\n - the path to the bounding box annotations\n ' infos = [] for seq in self.sequences: s = self.dataset[seq] s.load_lidar() if (len(s.lidar.data) > 100): raise ValueError(('The implementation for this dataset assumes that each sequence is ' + 'no longer than 100 frames. The current sequence has {}'.format(len(s.lidar.data)))) info = [{'sequence': seq, 'frame_idx': ii, 'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', '{:02d}.pkl.gz'.format(ii)), 'cuboids_path': os.path.join(self.root_path, 'dataset', seq, 'annotations', 'cuboids', '{:02d}.pkl.gz'.format(ii))} for ii in range(len(s.lidar.data))] infos.extend(info) del self.dataset._sequences[seq] return infos<|docstring|>Generate the dataset infos dict for each sample of the dataset. For each sample, this dict contains: - the sequence index - the frame index - the path to the lidar data - the path to the bounding box annotations<|endoftext|>
0dac01ff730f861de3f0dbac17a00b6d35bbcd7882e94becb9d3296625b1e259
def normal_init(m, mean, stddev, truncated=False): '\n weight initalizer: truncated normal and random normal.\n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()
weight initalizer: truncated normal and random normal.
faster_rcnn/lib/model/faster_rcnn/faster_rcnn.py
normal_init
maddie157/BiDet
161
python
def normal_init(m, mean, stddev, truncated=False): '\n \n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()
def normal_init(m, mean, stddev, truncated=False): '\n \n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()<|docstring|>weight initalizer: truncated normal and random normal.<|endoftext|>
cea191f5bf2abede3a2ebbec8ce0bd938603371e2017cb813a4fd0cb101ae470
def __init__(self, word_length=50, char_embed_size=100, char_rnn_units=400, char_recurrent_dropout=0.33, recurrent_dropout=0.33, rnn_units=400, embedding_dropout=0.5, main_layer_dropout=0.5, **kwargs): '\n Deep learning based sequence tagger.\n Consist of:\n - Char embedding (RNN+Attention)\n - RNN\n based on postag model of:\n https://www.aclweb.org/anthology/K17-3002/\n\n :param word_length: int, maximum character length in a token,\n relevant when using cnn\n :param char_embed_size: int, the size of character level embedding,\n relevant when using cnn\n :param char_rnn_units: int, RNN units on char level\n :param char_recurrent_dropout: float, dropout rate in RNN char level\n :param recurrent_dropout: float, dropout rate inside RNN\n :param rnn_units: int, the number of rnn units\n :param embedding_dropout: float, dropout rate after embedding layer\n :param main_layer_dropout: float, dropout rate in between LSTM\n ' super(StackedRNNTagger, self).__init__(**kwargs) self.word_length = word_length self.char_embed_size = char_embed_size self.ed = embedding_dropout self.rnn_units = rnn_units self.rd = recurrent_dropout self.char_rnn_units = char_rnn_units self.char_rd = char_recurrent_dropout self.main_layer_dropout = main_layer_dropout
Deep learning based sequence tagger. Consist of: - Char embedding (RNN+Attention) - RNN based on postag model of: https://www.aclweb.org/anthology/K17-3002/ :param word_length: int, maximum character length in a token, relevant when using cnn :param char_embed_size: int, the size of character level embedding, relevant when using cnn :param char_rnn_units: int, RNN units on char level :param char_recurrent_dropout: float, dropout rate in RNN char level :param recurrent_dropout: float, dropout rate inside RNN :param rnn_units: int, the number of rnn units :param embedding_dropout: float, dropout rate after embedding layer :param main_layer_dropout: float, dropout rate in between LSTM
model/RNNText/rnn_rnn_tagger.py
__init__
ajmalkurnia/deeplearning-text-playground
0
python
def __init__(self, word_length=50, char_embed_size=100, char_rnn_units=400, char_recurrent_dropout=0.33, recurrent_dropout=0.33, rnn_units=400, embedding_dropout=0.5, main_layer_dropout=0.5, **kwargs): '\n Deep learning based sequence tagger.\n Consist of:\n - Char embedding (RNN+Attention)\n - RNN\n based on postag model of:\n https://www.aclweb.org/anthology/K17-3002/\n\n :param word_length: int, maximum character length in a token,\n relevant when using cnn\n :param char_embed_size: int, the size of character level embedding,\n relevant when using cnn\n :param char_rnn_units: int, RNN units on char level\n :param char_recurrent_dropout: float, dropout rate in RNN char level\n :param recurrent_dropout: float, dropout rate inside RNN\n :param rnn_units: int, the number of rnn units\n :param embedding_dropout: float, dropout rate after embedding layer\n :param main_layer_dropout: float, dropout rate in between LSTM\n ' super(StackedRNNTagger, self).__init__(**kwargs) self.word_length = word_length self.char_embed_size = char_embed_size self.ed = embedding_dropout self.rnn_units = rnn_units self.rd = recurrent_dropout self.char_rnn_units = char_rnn_units self.char_rd = char_recurrent_dropout self.main_layer_dropout = main_layer_dropout
def __init__(self, word_length=50, char_embed_size=100, char_rnn_units=400, char_recurrent_dropout=0.33, recurrent_dropout=0.33, rnn_units=400, embedding_dropout=0.5, main_layer_dropout=0.5, **kwargs): '\n Deep learning based sequence tagger.\n Consist of:\n - Char embedding (RNN+Attention)\n - RNN\n based on postag model of:\n https://www.aclweb.org/anthology/K17-3002/\n\n :param word_length: int, maximum character length in a token,\n relevant when using cnn\n :param char_embed_size: int, the size of character level embedding,\n relevant when using cnn\n :param char_rnn_units: int, RNN units on char level\n :param char_recurrent_dropout: float, dropout rate in RNN char level\n :param recurrent_dropout: float, dropout rate inside RNN\n :param rnn_units: int, the number of rnn units\n :param embedding_dropout: float, dropout rate after embedding layer\n :param main_layer_dropout: float, dropout rate in between LSTM\n ' super(StackedRNNTagger, self).__init__(**kwargs) self.word_length = word_length self.char_embed_size = char_embed_size self.ed = embedding_dropout self.rnn_units = rnn_units self.rd = recurrent_dropout self.char_rnn_units = char_rnn_units self.char_rd = char_recurrent_dropout self.main_layer_dropout = main_layer_dropout<|docstring|>Deep learning based sequence tagger. Consist of: - Char embedding (RNN+Attention) - RNN based on postag model of: https://www.aclweb.org/anthology/K17-3002/ :param word_length: int, maximum character length in a token, relevant when using cnn :param char_embed_size: int, the size of character level embedding, relevant when using cnn :param char_rnn_units: int, RNN units on char level :param char_recurrent_dropout: float, dropout rate in RNN char level :param recurrent_dropout: float, dropout rate inside RNN :param rnn_units: int, the number of rnn units :param embedding_dropout: float, dropout rate after embedding layer :param main_layer_dropout: float, dropout rate in between LSTM<|endoftext|>
c9de942f94686a8a23c230ae62a231b08314c800e78dff4b6c72a0b7e0f8578c
def __get_char_embedding(self): '\n Initialize character embedding\n ' word_input_layer = Input(shape=(self.word_length,)) embedding_block = Embedding((self.n_chars + 1), self.char_embed_size, input_length=self.word_length, trainable=True, mask_zero=True)(word_input_layer) rnn_output = LSTM(self.char_rnn_units, recurrent_dropout=self.char_rd, return_sequences=True, return_state=True)(embedding_block) (embedding_block, h, c) = rnn_output embedding_block = CharTagAttention(self.char_rnn_units, self.word_length)(embedding_block) embedding_block = Concatenate()([embedding_block, c]) embedding_block = Dense(self.embedding_size)(embedding_block) embedding_block = Model(inputs=word_input_layer, outputs=embedding_block) embedding_block.summary() seq_inp_layer = Input(shape=(self.seq_length, self.word_length), name='char') embedding_block = TimeDistributed(embedding_block)(seq_inp_layer) return (seq_inp_layer, embedding_block)
Initialize character embedding
model/RNNText/rnn_rnn_tagger.py
__get_char_embedding
ajmalkurnia/deeplearning-text-playground
0
python
def __get_char_embedding(self): '\n \n ' word_input_layer = Input(shape=(self.word_length,)) embedding_block = Embedding((self.n_chars + 1), self.char_embed_size, input_length=self.word_length, trainable=True, mask_zero=True)(word_input_layer) rnn_output = LSTM(self.char_rnn_units, recurrent_dropout=self.char_rd, return_sequences=True, return_state=True)(embedding_block) (embedding_block, h, c) = rnn_output embedding_block = CharTagAttention(self.char_rnn_units, self.word_length)(embedding_block) embedding_block = Concatenate()([embedding_block, c]) embedding_block = Dense(self.embedding_size)(embedding_block) embedding_block = Model(inputs=word_input_layer, outputs=embedding_block) embedding_block.summary() seq_inp_layer = Input(shape=(self.seq_length, self.word_length), name='char') embedding_block = TimeDistributed(embedding_block)(seq_inp_layer) return (seq_inp_layer, embedding_block)
def __get_char_embedding(self): '\n \n ' word_input_layer = Input(shape=(self.word_length,)) embedding_block = Embedding((self.n_chars + 1), self.char_embed_size, input_length=self.word_length, trainable=True, mask_zero=True)(word_input_layer) rnn_output = LSTM(self.char_rnn_units, recurrent_dropout=self.char_rd, return_sequences=True, return_state=True)(embedding_block) (embedding_block, h, c) = rnn_output embedding_block = CharTagAttention(self.char_rnn_units, self.word_length)(embedding_block) embedding_block = Concatenate()([embedding_block, c]) embedding_block = Dense(self.embedding_size)(embedding_block) embedding_block = Model(inputs=word_input_layer, outputs=embedding_block) embedding_block.summary() seq_inp_layer = Input(shape=(self.seq_length, self.word_length), name='char') embedding_block = TimeDistributed(embedding_block)(seq_inp_layer) return (seq_inp_layer, embedding_block)<|docstring|>Initialize character embedding<|endoftext|>
814d044695658ba319fde481a70f7cfbfcb8dd727243ca1b9949b2ff9d5accc8
def init_model(self): '\n Initialize the network model\n ' input_word_layer = Input(shape=(self.seq_length,), name='word') pre_trained_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer=self.embedding, mask_zero=True, trainable=False) pre_trained_word_embed = pre_trained_word_embed(input_word_layer) learnable_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer='glorot_uniform', mask_zero=True)(input_word_layer) (input_char_layer, char_embed_block) = self.__get_char_embedding() input_layer = [input_char_layer, input_word_layer] embed_block = Add()([char_embed_block, pre_trained_word_embed, learnable_word_embed]) if (self.ed > 0): embed_block = Dropout(self.ed)(embed_block) self.model = embed_block self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dropout(self.main_layer_dropout)(self.model) self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dense((self.n_label + 1), activation='relu')(self.model) out = Dense((self.n_label + 1))(self.model) self.model = Model(input_layer, out) self.model.summary() self.model.compile(loss=CategoricalCrossentropy(from_logits=True), optimizer=self.optimizer, metrics=[CategoricalAccuracy()])
Initialize the network model
model/RNNText/rnn_rnn_tagger.py
init_model
ajmalkurnia/deeplearning-text-playground
0
python
def init_model(self): '\n \n ' input_word_layer = Input(shape=(self.seq_length,), name='word') pre_trained_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer=self.embedding, mask_zero=True, trainable=False) pre_trained_word_embed = pre_trained_word_embed(input_word_layer) learnable_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer='glorot_uniform', mask_zero=True)(input_word_layer) (input_char_layer, char_embed_block) = self.__get_char_embedding() input_layer = [input_char_layer, input_word_layer] embed_block = Add()([char_embed_block, pre_trained_word_embed, learnable_word_embed]) if (self.ed > 0): embed_block = Dropout(self.ed)(embed_block) self.model = embed_block self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dropout(self.main_layer_dropout)(self.model) self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dense((self.n_label + 1), activation='relu')(self.model) out = Dense((self.n_label + 1))(self.model) self.model = Model(input_layer, out) self.model.summary() self.model.compile(loss=CategoricalCrossentropy(from_logits=True), optimizer=self.optimizer, metrics=[CategoricalAccuracy()])
def init_model(self): '\n \n ' input_word_layer = Input(shape=(self.seq_length,), name='word') pre_trained_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer=self.embedding, mask_zero=True, trainable=False) pre_trained_word_embed = pre_trained_word_embed(input_word_layer) learnable_word_embed = Embedding((self.vocab_size + 1), self.embedding_size, input_length=self.seq_length, embeddings_initializer='glorot_uniform', mask_zero=True)(input_word_layer) (input_char_layer, char_embed_block) = self.__get_char_embedding() input_layer = [input_char_layer, input_word_layer] embed_block = Add()([char_embed_block, pre_trained_word_embed, learnable_word_embed]) if (self.ed > 0): embed_block = Dropout(self.ed)(embed_block) self.model = embed_block self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dropout(self.main_layer_dropout)(self.model) self.model = Bidirectional(LSTM(self.rnn_units, return_sequences=True, recurrent_dropout=self.rd))(self.model) self.model = Dense((self.n_label + 1), activation='relu')(self.model) out = Dense((self.n_label + 1))(self.model) self.model = Model(input_layer, out) self.model.summary() self.model.compile(loss=CategoricalCrossentropy(from_logits=True), optimizer=self.optimizer, metrics=[CategoricalAccuracy()])<|docstring|>Initialize the network model<|endoftext|>
14b9e2b827841816a8aaf697e12ecb1cd22dbe85f85ed873c836de67ac5c8ced
def vectorize_input(self, inp_seq): '\n Prepare vector of the input data\n\n :param inp_seq: list of list of string, tokenized input corpus\n :return word_vector: Dictionary, Word and char input vector\n ' input_vector = {'word': self.get_word_vector(inp_seq), 'char': self.get_char_vector(inp_seq)} return input_vector
Prepare vector of the input data :param inp_seq: list of list of string, tokenized input corpus :return word_vector: Dictionary, Word and char input vector
model/RNNText/rnn_rnn_tagger.py
vectorize_input
ajmalkurnia/deeplearning-text-playground
0
python
def vectorize_input(self, inp_seq): '\n Prepare vector of the input data\n\n :param inp_seq: list of list of string, tokenized input corpus\n :return word_vector: Dictionary, Word and char input vector\n ' input_vector = {'word': self.get_word_vector(inp_seq), 'char': self.get_char_vector(inp_seq)} return input_vector
def vectorize_input(self, inp_seq): '\n Prepare vector of the input data\n\n :param inp_seq: list of list of string, tokenized input corpus\n :return word_vector: Dictionary, Word and char input vector\n ' input_vector = {'word': self.get_word_vector(inp_seq), 'char': self.get_char_vector(inp_seq)} return input_vector<|docstring|>Prepare vector of the input data :param inp_seq: list of list of string, tokenized input corpus :return word_vector: Dictionary, Word and char input vector<|endoftext|>
4741385255ee02807daef403e8bbf4de9f96f4a93bf9e7ae3492d88c807d0004
@staticmethod def init_from_config(class_param): '\n Load model from the saved zipfile\n\n :param filepath: path to model zip file\n :return classifier: Loaded model class\n ' constructor_param = {'seq_length': class_param['seq_length'], 'word_length': class_param['word_length']} classifier = StackedRNNTagger(**constructor_param) classifier.label2idx = class_param['label2idx'] classifier.word2idx = class_param['word2idx'] classifier.idx2label = class_param['idx2label'] classifier.n_label = len(classifier.label2idx) classifier.char2idx = class_param['char2idx'] return classifier
Load model from the saved zipfile :param filepath: path to model zip file :return classifier: Loaded model class
model/RNNText/rnn_rnn_tagger.py
init_from_config
ajmalkurnia/deeplearning-text-playground
0
python
@staticmethod def init_from_config(class_param): '\n Load model from the saved zipfile\n\n :param filepath: path to model zip file\n :return classifier: Loaded model class\n ' constructor_param = {'seq_length': class_param['seq_length'], 'word_length': class_param['word_length']} classifier = StackedRNNTagger(**constructor_param) classifier.label2idx = class_param['label2idx'] classifier.word2idx = class_param['word2idx'] classifier.idx2label = class_param['idx2label'] classifier.n_label = len(classifier.label2idx) classifier.char2idx = class_param['char2idx'] return classifier
@staticmethod def init_from_config(class_param): '\n Load model from the saved zipfile\n\n :param filepath: path to model zip file\n :return classifier: Loaded model class\n ' constructor_param = {'seq_length': class_param['seq_length'], 'word_length': class_param['word_length']} classifier = StackedRNNTagger(**constructor_param) classifier.label2idx = class_param['label2idx'] classifier.word2idx = class_param['word2idx'] classifier.idx2label = class_param['idx2label'] classifier.n_label = len(classifier.label2idx) classifier.char2idx = class_param['char2idx'] return classifier<|docstring|>Load model from the saved zipfile :param filepath: path to model zip file :return classifier: Loaded model class<|endoftext|>
aa4dbbe5d62c282aad7a67ec560d6b60e3474a58e0f618afca4adebf14419476
def step(self, action): '\n\n :param int|None action: move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right), None is resign\n :return:\n ' assert ((action is None) or (0 <= action <= 63)), f'Illegal action={action}' if (action is None): self._resigned() return (self.board, {}) (own, enemy) = self.get_own_and_enemy() flipped = calc_flip(action, own, enemy) if (bit_count(flipped) == 0): self.illegal_move_to_lose(action) return (self.board, {}) own ^= flipped own |= (1 << action) enemy ^= flipped self.set_own_and_enemy(own, enemy) self.turn += 1 if (bit_count(find_correct_moves(enemy, own)) > 0): self.change_to_next_player() elif (bit_count(find_correct_moves(own, enemy)) > 0): pass else: self._game_over() return (self.board, {})
:param int|None action: move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right), None is resign :return:
src/reversi_zero/env/reversi_env.py
step
Jinnrry/reversi-alpha-zero
699
python
def step(self, action): '\n\n :param int|None action: move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right), None is resign\n :return:\n ' assert ((action is None) or (0 <= action <= 63)), f'Illegal action={action}' if (action is None): self._resigned() return (self.board, {}) (own, enemy) = self.get_own_and_enemy() flipped = calc_flip(action, own, enemy) if (bit_count(flipped) == 0): self.illegal_move_to_lose(action) return (self.board, {}) own ^= flipped own |= (1 << action) enemy ^= flipped self.set_own_and_enemy(own, enemy) self.turn += 1 if (bit_count(find_correct_moves(enemy, own)) > 0): self.change_to_next_player() elif (bit_count(find_correct_moves(own, enemy)) > 0): pass else: self._game_over() return (self.board, {})
def step(self, action): '\n\n :param int|None action: move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right), None is resign\n :return:\n ' assert ((action is None) or (0 <= action <= 63)), f'Illegal action={action}' if (action is None): self._resigned() return (self.board, {}) (own, enemy) = self.get_own_and_enemy() flipped = calc_flip(action, own, enemy) if (bit_count(flipped) == 0): self.illegal_move_to_lose(action) return (self.board, {}) own ^= flipped own |= (1 << action) enemy ^= flipped self.set_own_and_enemy(own, enemy) self.turn += 1 if (bit_count(find_correct_moves(enemy, own)) > 0): self.change_to_next_player() elif (bit_count(find_correct_moves(own, enemy)) > 0): pass else: self._game_over() return (self.board, {})<|docstring|>:param int|None action: move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right), None is resign :return:<|endoftext|>
05598c252718242c01bcf387e264b8ebc62982405cfccaeb787828be558e2270
@property def observation(self): '\n\n :rtype: Board\n ' return self.board
:rtype: Board
src/reversi_zero/env/reversi_env.py
observation
Jinnrry/reversi-alpha-zero
699
python
@property def observation(self): '\n\n \n ' return self.board
@property def observation(self): '\n\n \n ' return self.board<|docstring|>:rtype: Board<|endoftext|>
2a5e2e43fbd5fdb87f45c43f4fc9b45002f4019d1cbd9643322d6fb01b152a96
def _keys_in_sorted(move): ' sort by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id.id, move.product_id.responsible_id.id)
sort by picking and the responsible for the product the move.
odoo-13.0/addons/mrp/models/stock_picking.py
_keys_in_sorted
VaibhavBhujade/Blockchain-ERP-interoperability
12
python
def _keys_in_sorted(move): ' sort by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id.id, move.product_id.responsible_id.id)
def _keys_in_sorted(move): ' sort by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id.id, move.product_id.responsible_id.id)<|docstring|>sort by picking and the responsible for the product the move.<|endoftext|>
fb9433215a9007765350a6c5c850a850d08b4da3c371b30311933bbf73a59d5f
def _keys_in_groupby(move): ' group by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id, move.product_id.responsible_id)
group by picking and the responsible for the product the move.
odoo-13.0/addons/mrp/models/stock_picking.py
_keys_in_groupby
VaibhavBhujade/Blockchain-ERP-interoperability
12
python
def _keys_in_groupby(move): ' group by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id, move.product_id.responsible_id)
def _keys_in_groupby(move): ' group by picking and the responsible for the product the\n move.\n ' return (move.raw_material_production_id, move.product_id.responsible_id)<|docstring|>group by picking and the responsible for the product the move.<|endoftext|>
872c13ee8ca863fae21572d7e60b159f9a584eb7c0871084d675418061ddfca8
def __init__(self, tasker, workflow, timeout=1200, retry_delay=30, insecure=False, secret=None, expect_v2schema2=False): '\n constructor\n\n :param tasker: DockerTasker instance\n :param workflow: DockerBuildWorkflow instance\n :param timeout: int, maximum number of seconds to wait\n :param retry_delay: int, seconds between pull attempts\n :param insecure: bool, allow non-https pull if true\n :param secret: str, path to secret\n :param expect_v2schema2: bool, require Pulp to return a schema 2 digest and\n retry until it does\n ' super(PulpPullPlugin, self).__init__(tasker, workflow) self.timeout = timeout self.retry_delay = retry_delay self.insecure = insecure self.secret = secret self.expect_v2schema2 = (not get_prefer_schema1_digest(workflow, (not expect_v2schema2))) self.expect_v2schema2list = False self.expect_v2schema2list_only = False
constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param timeout: int, maximum number of seconds to wait :param retry_delay: int, seconds between pull attempts :param insecure: bool, allow non-https pull if true :param secret: str, path to secret :param expect_v2schema2: bool, require Pulp to return a schema 2 digest and retry until it does
atomic_reactor/plugins/post_pulp_pull.py
__init__
jcajka/atomic-reactor
0
python
def __init__(self, tasker, workflow, timeout=1200, retry_delay=30, insecure=False, secret=None, expect_v2schema2=False): '\n constructor\n\n :param tasker: DockerTasker instance\n :param workflow: DockerBuildWorkflow instance\n :param timeout: int, maximum number of seconds to wait\n :param retry_delay: int, seconds between pull attempts\n :param insecure: bool, allow non-https pull if true\n :param secret: str, path to secret\n :param expect_v2schema2: bool, require Pulp to return a schema 2 digest and\n retry until it does\n ' super(PulpPullPlugin, self).__init__(tasker, workflow) self.timeout = timeout self.retry_delay = retry_delay self.insecure = insecure self.secret = secret self.expect_v2schema2 = (not get_prefer_schema1_digest(workflow, (not expect_v2schema2))) self.expect_v2schema2list = False self.expect_v2schema2list_only = False
def __init__(self, tasker, workflow, timeout=1200, retry_delay=30, insecure=False, secret=None, expect_v2schema2=False): '\n constructor\n\n :param tasker: DockerTasker instance\n :param workflow: DockerBuildWorkflow instance\n :param timeout: int, maximum number of seconds to wait\n :param retry_delay: int, seconds between pull attempts\n :param insecure: bool, allow non-https pull if true\n :param secret: str, path to secret\n :param expect_v2schema2: bool, require Pulp to return a schema 2 digest and\n retry until it does\n ' super(PulpPullPlugin, self).__init__(tasker, workflow) self.timeout = timeout self.retry_delay = retry_delay self.insecure = insecure self.secret = secret self.expect_v2schema2 = (not get_prefer_schema1_digest(workflow, (not expect_v2schema2))) self.expect_v2schema2list = False self.expect_v2schema2list_only = False<|docstring|>constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param timeout: int, maximum number of seconds to wait :param retry_delay: int, seconds between pull attempts :param insecure: bool, allow non-https pull if true :param secret: str, path to secret :param expect_v2schema2: bool, require Pulp to return a schema 2 digest and retry until it does<|endoftext|>
f4e2c5dc6e63c03362dc210700e01fccbc76801a63063461be1cf1ebefe1b625
def generate_track_data(): '\n Converts the data from the Spotify Sequential Skip Prediction Challenge dataset to the database format.\n The data needs to be downloaded manually and put in the skip_dataset/data/ folder.\n :return:\n ' folder_name = 'skip_dataset/data/' all_files = [f for f in listdir(folder_name) if (isfile(join(folder_name, f)) and f.endswith('csv'))] df1 = pd.read_csv((folder_name + all_files[0])) df2 = pd.read_csv((folder_name + all_files[1])) df = pd.concat([df1, df2]) for (index, track) in tqdm(df.iterrows(), total=df.shape[0]): track_data = TrackData(track_id=track['track_id'], name=track['track_id'], artist='N/A', artist_id='N/A', acousticness=track['acousticness'], danceability=track['danceability'], energy=track['energy'], instrumentalness=track['instrumentalness'], liveness=track['liveness'], loudness=((track['loudness'] + 60) / 60), speechiness=track['speechiness'], valence=track['valence'], tempo=track['tempo']) track_data.save()
Converts the data from the Spotify Sequential Skip Prediction Challenge dataset to the database format. The data needs to be downloaded manually and put in the skip_dataset/data/ folder. :return:
Experiment Processing/skip_dataset/generate_track_data.py
generate_track_data
Austaon/GroupRecommendationThesis
0
python
def generate_track_data(): '\n Converts the data from the Spotify Sequential Skip Prediction Challenge dataset to the database format.\n The data needs to be downloaded manually and put in the skip_dataset/data/ folder.\n :return:\n ' folder_name = 'skip_dataset/data/' all_files = [f for f in listdir(folder_name) if (isfile(join(folder_name, f)) and f.endswith('csv'))] df1 = pd.read_csv((folder_name + all_files[0])) df2 = pd.read_csv((folder_name + all_files[1])) df = pd.concat([df1, df2]) for (index, track) in tqdm(df.iterrows(), total=df.shape[0]): track_data = TrackData(track_id=track['track_id'], name=track['track_id'], artist='N/A', artist_id='N/A', acousticness=track['acousticness'], danceability=track['danceability'], energy=track['energy'], instrumentalness=track['instrumentalness'], liveness=track['liveness'], loudness=((track['loudness'] + 60) / 60), speechiness=track['speechiness'], valence=track['valence'], tempo=track['tempo']) track_data.save()
def generate_track_data(): '\n Converts the data from the Spotify Sequential Skip Prediction Challenge dataset to the database format.\n The data needs to be downloaded manually and put in the skip_dataset/data/ folder.\n :return:\n ' folder_name = 'skip_dataset/data/' all_files = [f for f in listdir(folder_name) if (isfile(join(folder_name, f)) and f.endswith('csv'))] df1 = pd.read_csv((folder_name + all_files[0])) df2 = pd.read_csv((folder_name + all_files[1])) df = pd.concat([df1, df2]) for (index, track) in tqdm(df.iterrows(), total=df.shape[0]): track_data = TrackData(track_id=track['track_id'], name=track['track_id'], artist='N/A', artist_id='N/A', acousticness=track['acousticness'], danceability=track['danceability'], energy=track['energy'], instrumentalness=track['instrumentalness'], liveness=track['liveness'], loudness=((track['loudness'] + 60) / 60), speechiness=track['speechiness'], valence=track['valence'], tempo=track['tempo']) track_data.save()<|docstring|>Converts the data from the Spotify Sequential Skip Prediction Challenge dataset to the database format. The data needs to be downloaded manually and put in the skip_dataset/data/ folder. :return:<|endoftext|>
ce78406a062ffc41c99db082eea119819d48dfcb868e4fb93b36498e5f691b13
def __init__(self, transitions, rewards, horizon=100, init_state=None, seed=None): 'MDP initialization.\n\n Parameters\n ----------\n transitions : array-like\n Array holding transition matrix for each action. The dimension of\n the state and action spaces will be deduced from this array.\n rewards : array-like\n Array holding the reward matrix for each action. It needs to comply\n with the dimensions deduced from the transitions array.\n init_state : int\n Initial state of the process. If None, it will be set to 0.\n ' self.horizon = horizon self.transitions = transitions self.rewards = rewards self.action_space = DiscreteSpace(len(transitions)) self.state_space = DiscreteSpace(len(transitions[0])) if (init_state is None): init_state = 0 elif (not self.state_space.contains(init_state)): raise ValueError('Initial state (%d) is not a valid state.', init_state) self.init_state = init_state self.state = init_state self.random = np.random.RandomState() if (seed is not None): self.seed = seed else: self._seed = None
MDP initialization. Parameters ---------- transitions : array-like Array holding transition matrix for each action. The dimension of the state and action spaces will be deduced from this array. rewards : array-like Array holding the reward matrix for each action. It needs to comply with the dimensions deduced from the transitions array. init_state : int Initial state of the process. If None, it will be set to 0.
SafeRLBench/envs/mdp.py
__init__
befelix/Safe-RL-Benchmark
19
python
def __init__(self, transitions, rewards, horizon=100, init_state=None, seed=None): 'MDP initialization.\n\n Parameters\n ----------\n transitions : array-like\n Array holding transition matrix for each action. The dimension of\n the state and action spaces will be deduced from this array.\n rewards : array-like\n Array holding the reward matrix for each action. It needs to comply\n with the dimensions deduced from the transitions array.\n init_state : int\n Initial state of the process. If None, it will be set to 0.\n ' self.horizon = horizon self.transitions = transitions self.rewards = rewards self.action_space = DiscreteSpace(len(transitions)) self.state_space = DiscreteSpace(len(transitions[0])) if (init_state is None): init_state = 0 elif (not self.state_space.contains(init_state)): raise ValueError('Initial state (%d) is not a valid state.', init_state) self.init_state = init_state self.state = init_state self.random = np.random.RandomState() if (seed is not None): self.seed = seed else: self._seed = None
def __init__(self, transitions, rewards, horizon=100, init_state=None, seed=None): 'MDP initialization.\n\n Parameters\n ----------\n transitions : array-like\n Array holding transition matrix for each action. The dimension of\n the state and action spaces will be deduced from this array.\n rewards : array-like\n Array holding the reward matrix for each action. It needs to comply\n with the dimensions deduced from the transitions array.\n init_state : int\n Initial state of the process. If None, it will be set to 0.\n ' self.horizon = horizon self.transitions = transitions self.rewards = rewards self.action_space = DiscreteSpace(len(transitions)) self.state_space = DiscreteSpace(len(transitions[0])) if (init_state is None): init_state = 0 elif (not self.state_space.contains(init_state)): raise ValueError('Initial state (%d) is not a valid state.', init_state) self.init_state = init_state self.state = init_state self.random = np.random.RandomState() if (seed is not None): self.seed = seed else: self._seed = None<|docstring|>MDP initialization. Parameters ---------- transitions : array-like Array holding transition matrix for each action. The dimension of the state and action spaces will be deduced from this array. rewards : array-like Array holding the reward matrix for each action. It needs to comply with the dimensions deduced from the transitions array. init_state : int Initial state of the process. If None, it will be set to 0.<|endoftext|>
8f73a29b2b384b743d857d58e321f8e047eaf18fc605971554ceeb18139416a9
@property def seed(self): 'Seed.' return self._seed
Seed.
SafeRLBench/envs/mdp.py
seed
befelix/Safe-RL-Benchmark
19
python
@property def seed(self): return self._seed
@property def seed(self): return self._seed<|docstring|>Seed.<|endoftext|>
6ea81e598260403add6bca33c07a4ed68637d94d91bd6b2a62753e4677e7e949
def _query_to_responses(query: Query, func_response: Callable[([Query], str)]) -> List[search_pb2.SearchResponse]: '\n\n :param query:\n :param func_response:\n :return:\n ' return [search_pb2.SearchResponse(response=func_response(rec)) for rec in query]
:param query: :param func_response: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
_query_to_responses
yoyonel/2017_GRPC_Tutorial
1
python
def _query_to_responses(query: Query, func_response: Callable[([Query], str)]) -> List[search_pb2.SearchResponse]: '\n\n :param query:\n :param func_response:\n :return:\n ' return [search_pb2.SearchResponse(response=func_response(rec)) for rec in query]
def _query_to_responses(query: Query, func_response: Callable[([Query], str)]) -> List[search_pb2.SearchResponse]: '\n\n :param query:\n :param func_response:\n :return:\n ' return [search_pb2.SearchResponse(response=func_response(rec)) for rec in query]<|docstring|>:param query: :param func_response: :return:<|endoftext|>
ed6937c238b5876b5e11d3e194b1be92ff971b60641dc115c90641d230e6da9a
def _search_thing(lat: float, lng: float, srid: int=(- 1)) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(Thing).filter(func.ST_Contains(Thing.geom, Position(lat, lng, srid).to_wktelement)) return _query_to_responses(query, (lambda rec: rec.name))
:param lat: :param lng: :param srid: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
_search_thing
yoyonel/2017_GRPC_Tutorial
1
python
def _search_thing(lat: float, lng: float, srid: int=(- 1)) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(Thing).filter(func.ST_Contains(Thing.geom, Position(lat, lng, srid).to_wktelement)) return _query_to_responses(query, (lambda rec: rec.name))
def _search_thing(lat: float, lng: float, srid: int=(- 1)) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(Thing).filter(func.ST_Contains(Thing.geom, Position(lat, lng, srid).to_wktelement)) return _query_to_responses(query, (lambda rec: rec.name))<|docstring|>:param lat: :param lng: :param srid: :return:<|endoftext|>
8745696f34376870a570dbdd2cf514f86fc4b878e91eebc4df2ca7e17a47928d
def _search_commune(lat: float, lng: float, srid: int=4326) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(OGRGeoJSON).filter(func.ST_Contains(OGRGeoJSON.wkb_geometry, Position(lat, lng, srid).to_wkbelement)) return _query_to_responses(query, (lambda rec: '{} - {} - {}'.format(rec.insee, rec.nom, rec.wikipedia)))
:param lat: :param lng: :param srid: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
_search_commune
yoyonel/2017_GRPC_Tutorial
1
python
def _search_commune(lat: float, lng: float, srid: int=4326) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(OGRGeoJSON).filter(func.ST_Contains(OGRGeoJSON.wkb_geometry, Position(lat, lng, srid).to_wkbelement)) return _query_to_responses(query, (lambda rec: '{} - {} - {}'.format(rec.insee, rec.nom, rec.wikipedia)))
def _search_commune(lat: float, lng: float, srid: int=4326) -> List[search_pb2.SearchResponse]: '\n\n :param lat:\n :param lng:\n :param srid:\n :return:\n ' session = session_factory() query = session.query(OGRGeoJSON).filter(func.ST_Contains(OGRGeoJSON.wkb_geometry, Position(lat, lng, srid).to_wkbelement)) return _query_to_responses(query, (lambda rec: '{} - {} - {}'.format(rec.insee, rec.nom, rec.wikipedia)))<|docstring|>:param lat: :param lng: :param srid: :return:<|endoftext|>
b13d30e853fc2819faefdeb7c28a59dfc9da5d8d19f29316f0f9efb7b3a10be3
@stat.timer('search') def search(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (commune) request: ' + str(request))) responses = _search_commune(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)
:param request: :param _context: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
search
yoyonel/2017_GRPC_Tutorial
1
python
@stat.timer('search') def search(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (commune) request: ' + str(request))) responses = _search_commune(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)
@stat.timer('search') def search(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (commune) request: ' + str(request))) responses = _search_commune(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)<|docstring|>:param request: :param _context: :return:<|endoftext|>
d953044ea21cb667d370d4f7ce78ea8089dc961a1080054875f57446a73ec11b
@stat.timer('search_thing') def search_thing(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (thing) request: ' + str(request))) responses = _search_thing(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)
:param request: :param _context: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
search_thing
yoyonel/2017_GRPC_Tutorial
1
python
@stat.timer('search_thing') def search_thing(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (thing) request: ' + str(request))) responses = _search_thing(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)
@stat.timer('search_thing') def search_thing(self, request: search_pb2.SearchRequest, _context) -> search_pb2.SearchResponses: '\n\n :param request:\n :param _context:\n :return:\n ' self.stat.incr('search_count') logger.info(('search (thing) request: ' + str(request))) responses = _search_thing(request.lat, request.lng) logger.info(('search responses: ' + str(responses))) return search_pb2.SearchResponses(responses=responses)<|docstring|>:param request: :param _context: :return:<|endoftext|>
84b6827781e36fd5c29897b8dd393d9aad30e52be116421ab3bac8b9698e89f0
@stat.timer('monitor') def monitor(self, _request, _context): '\n\n :param _request:\n :param _context:\n :return:\n ' session = session_factory() self.stat.incr('monitor_count') n_things = session.query(Thing).count() return search_pb2.MonitorResponse(n_things=n_things)
:param _request: :param _context: :return:
src/tutorial/grpc/geodatas/rpc/search_servicer.py
monitor
yoyonel/2017_GRPC_Tutorial
1
python
@stat.timer('monitor') def monitor(self, _request, _context): '\n\n :param _request:\n :param _context:\n :return:\n ' session = session_factory() self.stat.incr('monitor_count') n_things = session.query(Thing).count() return search_pb2.MonitorResponse(n_things=n_things)
@stat.timer('monitor') def monitor(self, _request, _context): '\n\n :param _request:\n :param _context:\n :return:\n ' session = session_factory() self.stat.incr('monitor_count') n_things = session.query(Thing).count() return search_pb2.MonitorResponse(n_things=n_things)<|docstring|>:param _request: :param _context: :return:<|endoftext|>
75f7aa7036495b584bdcb28522d799ebcc6c19aa5135fe9699b4a5aa9fc54934
def spherical_multiRegion_Green_Arnoldi_Mmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] All_unitMvecs = [] rgridlist = [] for i in range(regionnum): print('M wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmnRgM, rnImM, unitrmnMpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgMmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnMnormsqr_Taylor(n, k, RPlist[0], rmnRgM)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnMpols) - 1)): All_unitMvecs.append((((k * rgrid) ** n) * po.polyval((k * rgrid), unitrmnMpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid) unitImdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] All_unitMvecs.extend(unitMvecs[:(- 2)]) rgridlist.append(rgrid) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitMvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecgrid = np.zeros_like(fullrgrid) if (i == 0): vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][:])) else: vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][1:])) All_fullr_unitMvecs.append(vecgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitMvecs)
generates a representation of the Green's function/Umatrix over spherical region of radius R with sub-bases with support in shell regions with boundaries delineated by RPlist this is done so that the projection operator for spatial projection based constraints is explicit the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1] RPlist[-1] is the radius of the entire bounding sphere the first region is an inner sphere, the other regions are cocentric shells note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code in the new optimizations U = V^\dagger-1 - G^\dagger; here we calculate Uinv, and Uinv = V^-1-G
dualbound/Arnoldi/spherical_multiRegion_Green_Arnoldi.py
spherical_multiRegion_Green_Arnoldi_Mmn_Uconverge
PengningChao/emdb-sphere
0
python
def spherical_multiRegion_Green_Arnoldi_Mmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] All_unitMvecs = [] rgridlist = [] for i in range(regionnum): print('M wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmnRgM, rnImM, unitrmnMpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgMmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnMnormsqr_Taylor(n, k, RPlist[0], rmnRgM)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnMpols) - 1)): All_unitMvecs.append((((k * rgrid) ** n) * po.polyval((k * rgrid), unitrmnMpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid) unitImdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] All_unitMvecs.extend(unitMvecs[:(- 2)]) rgridlist.append(rgrid) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitMvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecgrid = np.zeros_like(fullrgrid) if (i == 0): vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][:])) else: vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][1:])) All_fullr_unitMvecs.append(vecgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitMvecs)
def spherical_multiRegion_Green_Arnoldi_Mmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] All_unitMvecs = [] rgridlist = [] for i in range(regionnum): print('M wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmnRgM, rnImM, unitrmnMpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgMmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnMnormsqr_Taylor(n, k, RPlist[0], rmnRgM)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnMpols) - 1)): All_unitMvecs.append((((k * rgrid) ** n) * po.polyval((k * rgrid), unitrmnMpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImMmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutMgrid = (RgMgrid + (1j * ImMgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[0], OutMgrid, rsqrgrid, rdiffgrid) unitImdotOutlist[i] = rgrid_Mmn_dot(unitMvecs[1], OutMgrid, rsqrgrid, rdiffgrid) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] All_unitMvecs.extend(unitMvecs[:(- 2)]) rgridlist.append(rgrid) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitMvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecgrid = np.zeros_like(fullrgrid) if (i == 0): vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][:])) else: vecgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitMvecs[j][1:])) All_fullr_unitMvecs.append(vecgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitMvecs)<|docstring|>generates a representation of the Green's function/Umatrix over spherical region of radius R with sub-bases with support in shell regions with boundaries delineated by RPlist this is done so that the projection operator for spatial projection based constraints is explicit the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1] RPlist[-1] is the radius of the entire bounding sphere the first region is an inner sphere, the other regions are cocentric shells note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code in the new optimizations U = V^\dagger-1 - G^\dagger; here we calculate Uinv, and Uinv = V^-1-G<|endoftext|>
5f830cb54ada9cb2263479b29b636c0247a8f74483c3343515c10864fbaeb35d
def spherical_multiRegion_Green_Arnoldi_Nmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] rgridlist = [] All_unitBvecs = [] All_unitPvecs = [] for i in range(regionnum): print('N wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmRgN_Bpol, rmRgN_Ppol, rnImN_Bpol, rnImN_Ppol, unitrmnBpols, unitrmnPpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgNmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnNnormsqr_Taylor(n, k, RPlist[0], rmRgN_Bpol, rmRgN_Ppol)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnBpols) - 1)): All_unitBvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnBpols[i].coef))) All_unitPvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnPpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] rgridlist.append(rgrid) All_unitBvecs.extend(unitBvecs[:(- 2)]) All_unitPvecs.extend(unitPvecs[:(- 2)]) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitBvecs = [] All_fullr_unitPvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecBgrid = np.zeros_like(fullrgrid) vecPgrid = np.zeros_like(fullrgrid) if (i == 0): vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][:])) else: vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][1:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][1:])) All_fullr_unitBvecs.append(vecBgrid) All_fullr_unitPvecs.append(vecPgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitBvecs, All_fullr_unitPvecs)
generates a representation of the Green's function/Umatrix over spherical region of radius R with sub-bases with support in shell regions with boundaries delineated by RPlist this is done so that the projection operator for spatial projection based constraints is explicit the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1] RPlist[-1] is the radius of the entire bounding sphere the first region is an inner sphere, the other regions are cocentric shells note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code in the new optimizations U = V^\dagger-1 - G^\dagger; here we calculate Uinv, and Uinv = V^-1-G
dualbound/Arnoldi/spherical_multiRegion_Green_Arnoldi.py
spherical_multiRegion_Green_Arnoldi_Nmn_Uconverge
PengningChao/emdb-sphere
0
python
def spherical_multiRegion_Green_Arnoldi_Nmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] rgridlist = [] All_unitBvecs = [] All_unitPvecs = [] for i in range(regionnum): print('N wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmRgN_Bpol, rmRgN_Ppol, rnImN_Bpol, rnImN_Ppol, unitrmnBpols, unitrmnPpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgNmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnNnormsqr_Taylor(n, k, RPlist[0], rmRgN_Bpol, rmRgN_Ppol)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnBpols) - 1)): All_unitBvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnBpols[i].coef))) All_unitPvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnPpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] rgridlist.append(rgrid) All_unitBvecs.extend(unitBvecs[:(- 2)]) All_unitPvecs.extend(unitPvecs[:(- 2)]) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitBvecs = [] All_fullr_unitPvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecBgrid = np.zeros_like(fullrgrid) vecPgrid = np.zeros_like(fullrgrid) if (i == 0): vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][:])) else: vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][1:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][1:])) All_fullr_unitBvecs.append(vecBgrid) All_fullr_unitPvecs.append(vecPgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitBvecs, All_fullr_unitPvecs)
def spherical_multiRegion_Green_Arnoldi_Nmn_Uconverge(n, k, RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-08, veclim=3, delveclim=2, maxveclim=40): "\n generates a representation of the Green's function/Umatrix over spherical region of radius R\n with sub-bases with support in shell regions with boundaries delineated by RPlist\n this is done so that the projection operator for spatial projection based constraints is explicit\n the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1]\n RPlist[-1] is the radius of the entire bounding sphere\n the first region is an inner sphere, the other regions are cocentric shells\n note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code\n in the new optimizations U = V^\\dagger-1 - G^\\dagger; here we calculate Uinv, and Uinv = V^-1-G\n " mp.dps = mpdps regionnum = len(RPlist) unitRgdotRglist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitRgdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) unitImdotOutlist = np.zeros(regionnum, dtype=type((1j * mp.one))) subGmatlist = [] vecnum = 0 subbasis_head_indlist = [] rgridlist = [] All_unitBvecs = [] All_unitPvecs = [] for i in range(regionnum): print('N wave Region #', i) if (i == 0): subbasis_head_indlist.append(0) (rmRgN_Bpol, rmRgN_Ppol, rnImN_Bpol, rnImN_Ppol, unitrmnBpols, unitrmnPpols, Uinv) = speedup_Green_Taylor_Arnoldi_RgNmn_Uconverge(n, k, RPlist[0], klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol) unitRgdotRglist[0] = mp.sqrt(rmnNnormsqr_Taylor(n, k, RPlist[0], rmRgN_Bpol, rmRgN_Ppol)) subGmat = ((mp.eye(Uinv.rows) * invchi) - Uinv) subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist())) vecnum += Uinv.rows rgrid = np.linspace(0, RPlist[0], gridpts) rgridlist.append(rgrid) for i in range((len(unitrmnBpols) - 1)): All_unitBvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnBpols[i].coef))) All_unitPvecs.append((((k * rgrid) ** (n - 1)) * po.polyval((k * rgrid), unitrmnPpols[i].coef))) else: subbasis_head_indlist.append(vecnum) try: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) except FloatingPointError: (rgrid, rsqrgrid, rdiffgrid, RgBgrid, RgPgrid, ImBgrid, ImPgrid, unitBvecs, unitPvecs, Uinv, Gmat) = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge_mp(n, k, RPlist[(i - 1)], RPlist[i], invchi, gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim) OutBgrid = (RgBgrid + (1j * ImBgrid)) OutPgrid = (RgPgrid + (1j * ImPgrid)) unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid, RgPgrid, rsqrgrid, rdiffgrid)) unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0], unitPvecs[0], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1], unitPvecs[1], OutBgrid, OutPgrid, rsqrgrid, rdiffgrid)) Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist()) subGmatlist.append(Gmat) vecnum += Gmat.shape[0] rgridlist.append(rgrid) All_unitBvecs.extend(unitBvecs[:(- 2)]) All_unitPvecs.extend(unitPvecs[:(- 2)]) subbasis_head_indlist.append(vecnum) Gmat = np.zeros((vecnum, vecnum), dtype=np.complex) for i in range(regionnum): indstart = subbasis_head_indlist[i] indend = subbasis_head_indlist[(i + 1)] Gmat[(indstart:indend, indstart:indend)] = subGmatlist[i][(:, :)] jkcubed = (1j * (k ** 3)) for i in range(regionnum): Rgiind = subbasis_head_indlist[i] Imiind = (Rgiind + 1) for j in range(i): Rgjind = subbasis_head_indlist[j] Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitRgdotOutlist[i])) Gmat[(Rgjind, Imiind)] = np.complex(((jkcubed * unitRgdotRglist[j]) * unitImdotOutlist[i])) for j in range((i + 1), regionnum): Rgjind = subbasis_head_indlist[j] Imjind = (Rgjind + 1) Gmat[(Rgjind, Rgiind)] = np.complex(((jkcubed * unitRgdotOutlist[j]) * unitRgdotRglist[i])) Gmat[(Imjind, Rgiind)] = np.complex(((jkcubed * unitImdotOutlist[j]) * unitRgdotRglist[i])) Uinv = ((invchi * np.eye(vecnum)) - Gmat) fullrgrid = rgridlist[0].copy() rboundaries = [0, gridpts] for i in range(1, len(rgridlist)): fullrgrid = np.concatenate((fullrgrid, rgridlist[i][1:])) rboundaries.append(len(fullrgrid)) All_fullr_unitBvecs = [] All_fullr_unitPvecs = [] for i in range(len(rgridlist)): for j in range(subbasis_head_indlist[i], subbasis_head_indlist[(i + 1)]): vecBgrid = np.zeros_like(fullrgrid) vecPgrid = np.zeros_like(fullrgrid) if (i == 0): vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][:])) else: vecBgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitBvecs[j][1:])) vecPgrid[rboundaries[i]:rboundaries[(i + 1)]] = mparr_to_npreal(mp_re(All_unitPvecs[j][1:])) All_fullr_unitBvecs.append(vecBgrid) All_fullr_unitPvecs.append(vecPgrid) return (Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitBvecs, All_fullr_unitPvecs)<|docstring|>generates a representation of the Green's function/Umatrix over spherical region of radius R with sub-bases with support in shell regions with boundaries delineated by RPlist this is done so that the projection operator for spatial projection based constraints is explicit the sub-regions are 0-RPlist[0], RPlist[0]-RPlist[1], ..., RPlist[-2]-RPlist[-1] RPlist[-1] is the radius of the entire bounding sphere the first region is an inner sphere, the other regions are cocentric shells note here we are still using the old convention for the U matrix to be consistent with older Arnoldi code in the new optimizations U = V^\dagger-1 - G^\dagger; here we calculate Uinv, and Uinv = V^-1-G<|endoftext|>
04f42460f5f8ca60bfdd77e93f4a098a6376ad6eb478ce9cb75956be8648f65c
def test_parse_exif_string(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_string` definition.\n ' exif_tag = ExifTag('EXIF', 'Make', 'Canon', '271') self.assertEqual(parse_exif_string(exif_tag), 'Canon')
Tests :func:`colour_hdri.utilities.exif.parse_exif_string` definition.
colour_hdri/utilities/tests/test_exif.py
test_parse_exif_string
colour-science/colour-hdri
92
python
def test_parse_exif_string(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Make', 'Canon', '271') self.assertEqual(parse_exif_string(exif_tag), 'Canon')
def test_parse_exif_string(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Make', 'Canon', '271') self.assertEqual(parse_exif_string(exif_tag), 'Canon')<|docstring|>Tests :func:`colour_hdri.utilities.exif.parse_exif_string` definition.<|endoftext|>
0766ae1195add71f482063514e55e065d07c2932c533770cd0b0aef379b367c6
def test_parse_exif_numeric(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_numeric` definition.\n ' exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertEqual(parse_exif_numeric(exif_tag), 16) exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertIsInstance(parse_exif_numeric(exif_tag, np.int_), np.int_)
Tests :func:`colour_hdri.utilities.exif.parse_exif_numeric` definition.
colour_hdri/utilities/tests/test_exif.py
test_parse_exif_numeric
colour-science/colour-hdri
92
python
def test_parse_exif_numeric(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertEqual(parse_exif_numeric(exif_tag), 16) exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertIsInstance(parse_exif_numeric(exif_tag, np.int_), np.int_)
def test_parse_exif_numeric(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertEqual(parse_exif_numeric(exif_tag), 16) exif_tag = ExifTag('EXIF', 'Focal Length', '16', '37386') self.assertIsInstance(parse_exif_numeric(exif_tag, np.int_), np.int_)<|docstring|>Tests :func:`colour_hdri.utilities.exif.parse_exif_numeric` definition.<|endoftext|>
b014e78185386a8ea62391ee4c6811d3feab3cb4c4c96016436b3ecefc2b2ed8
def test_parse_exif_fraction(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_fraction`\n definition.\n ' exif_tag = ExifTag('EXIF', 'Exposure Time', '0.01666666667', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.01666666, places=7) exif_tag = ExifTag('EXIF', 'Exposure Time', '10/4000', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.0025, places=7) self.assertIsInstance(parse_exif_fraction(exif_tag, np.int_), np.int_)
Tests :func:`colour_hdri.utilities.exif.parse_exif_fraction` definition.
colour_hdri/utilities/tests/test_exif.py
test_parse_exif_fraction
colour-science/colour-hdri
92
python
def test_parse_exif_fraction(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_fraction`\n definition.\n ' exif_tag = ExifTag('EXIF', 'Exposure Time', '0.01666666667', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.01666666, places=7) exif_tag = ExifTag('EXIF', 'Exposure Time', '10/4000', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.0025, places=7) self.assertIsInstance(parse_exif_fraction(exif_tag, np.int_), np.int_)
def test_parse_exif_fraction(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_fraction`\n definition.\n ' exif_tag = ExifTag('EXIF', 'Exposure Time', '0.01666666667', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.01666666, places=7) exif_tag = ExifTag('EXIF', 'Exposure Time', '10/4000', '33434') self.assertAlmostEqual(parse_exif_fraction(exif_tag), 0.0025, places=7) self.assertIsInstance(parse_exif_fraction(exif_tag, np.int_), np.int_)<|docstring|>Tests :func:`colour_hdri.utilities.exif.parse_exif_fraction` definition.<|endoftext|>
b2a6b188e19105b0ca3150706ae2cbf0b315500c18c9e7cd2d3fc13943f2d5d0
def test_parse_exif_array(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_array` definition.\n ' exif_tag = ExifTag('EXIF', 'Color Matrix 1', '0.5309 -0.0229 -0.0336 -0.6241 1.3265 0.3337 -0.0817 0.1215 0.6664', '50721') np.testing.assert_array_equal(parse_exif_array(exif_tag), np.array([0.5309, (- 0.0229), (- 0.0336), (- 0.6241), 1.3265, 0.3337, (- 0.0817), 0.1215, 0.6664])) np.testing.assert_array_equal(parse_exif_array(exif_tag, shape=(3, 3)), np.array([[0.5309, (- 0.0229), (- 0.0336)], [(- 0.6241), 1.3265, 0.3337], [(- 0.0817), 0.1215, 0.6664]]))
Tests :func:`colour_hdri.utilities.exif.parse_exif_array` definition.
colour_hdri/utilities/tests/test_exif.py
test_parse_exif_array
colour-science/colour-hdri
92
python
def test_parse_exif_array(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Color Matrix 1', '0.5309 -0.0229 -0.0336 -0.6241 1.3265 0.3337 -0.0817 0.1215 0.6664', '50721') np.testing.assert_array_equal(parse_exif_array(exif_tag), np.array([0.5309, (- 0.0229), (- 0.0336), (- 0.6241), 1.3265, 0.3337, (- 0.0817), 0.1215, 0.6664])) np.testing.assert_array_equal(parse_exif_array(exif_tag, shape=(3, 3)), np.array([[0.5309, (- 0.0229), (- 0.0336)], [(- 0.6241), 1.3265, 0.3337], [(- 0.0817), 0.1215, 0.6664]]))
def test_parse_exif_array(self): '\n \n ' exif_tag = ExifTag('EXIF', 'Color Matrix 1', '0.5309 -0.0229 -0.0336 -0.6241 1.3265 0.3337 -0.0817 0.1215 0.6664', '50721') np.testing.assert_array_equal(parse_exif_array(exif_tag), np.array([0.5309, (- 0.0229), (- 0.0336), (- 0.6241), 1.3265, 0.3337, (- 0.0817), 0.1215, 0.6664])) np.testing.assert_array_equal(parse_exif_array(exif_tag, shape=(3, 3)), np.array([[0.5309, (- 0.0229), (- 0.0336)], [(- 0.6241), 1.3265, 0.3337], [(- 0.0817), 0.1215, 0.6664]]))<|docstring|>Tests :func:`colour_hdri.utilities.exif.parse_exif_array` definition.<|endoftext|>
01db84fe224761570bbb95231d8e12e674ad13c9eecf315ee6882e94d57b0538
def test_parse_exif_data(self): '\n Tests :func:`colour_hdri.utilities.exif.parse_exif_data` definition.\n ' self.assertListEqual(parse_exif_data('[XMP] - Description :'), ['XMP', '-', 'Description', '']) self.assertListEqual(parse_exif_data('[EXIF] 296 Resolution Unit : 2'), ['EXIF', '296', 'Resolution Unit', '2']) self.assertListEqual(parse_exif_data('[ICC_Profile] 8 Profile Version : 528'), ['ICC_Profile', '8', 'Profile Version', '528'])
Tests :func:`colour_hdri.utilities.exif.parse_exif_data` definition.
colour_hdri/utilities/tests/test_exif.py
test_parse_exif_data
colour-science/colour-hdri
92
python
def test_parse_exif_data(self): '\n \n ' self.assertListEqual(parse_exif_data('[XMP] - Description :'), ['XMP', '-', 'Description', ]) self.assertListEqual(parse_exif_data('[EXIF] 296 Resolution Unit : 2'), ['EXIF', '296', 'Resolution Unit', '2']) self.assertListEqual(parse_exif_data('[ICC_Profile] 8 Profile Version : 528'), ['ICC_Profile', '8', 'Profile Version', '528'])
def test_parse_exif_data(self): '\n \n ' self.assertListEqual(parse_exif_data('[XMP] - Description :'), ['XMP', '-', 'Description', ]) self.assertListEqual(parse_exif_data('[EXIF] 296 Resolution Unit : 2'), ['EXIF', '296', 'Resolution Unit', '2']) self.assertListEqual(parse_exif_data('[ICC_Profile] 8 Profile Version : 528'), ['ICC_Profile', '8', 'Profile Version', '528'])<|docstring|>Tests :func:`colour_hdri.utilities.exif.parse_exif_data` definition.<|endoftext|>
6ff6524531e3f1a825c131585ab57759829398c8397b0edd6112342bdc5f85e6
def test_read_exif_tags(self): '\n Tests :func:`colour_hdri.utilities.exif.read_exif_tags` definition.\n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] exif_data = vivified_to_dict(read_exif_tags(test_jpg_image)) self.assertIsInstance(exif_data, type(dict())) self.assertListEqual(sorted(exif_data.keys()), ['Composite', 'EXIF', 'ExifTool', 'File', 'ICC_Profile', 'JFIF', 'Photoshop', 'XMP']) self.assertListEqual(sorted(exif_data['EXIF'].values()), [[ExifTag('EXIF', 'Camera Model Name', 'EOS 5D Mark II', '272')], [ExifTag('EXIF', 'Create Date', '2015:09:19 03:39:20', '36868')], [ExifTag('EXIF', 'Date/Time Original', '2015:09:19 03:39:20', '36867')], [ExifTag('EXIF', 'Exif Image Height', '426', '40963')], [ExifTag('EXIF', 'Exif Image Width', '640', '40962')], [ExifTag('EXIF', 'Exposure Time', '0.125', '33434')], [ExifTag('EXIF', 'F Number', '8', '33437')], [ExifTag('EXIF', 'Focal Length', '16', '37386')], [ExifTag('EXIF', 'ISO', '100', '34855')], [ExifTag('EXIF', 'Make', 'Canon', '271')], [ExifTag('EXIF', 'Modify Date', '2015:09:19 03:39:20', '306')], [ExifTag('EXIF', 'Orientation', '1', '274')], [ExifTag('EXIF', 'Photometric Interpretation', '2', '262')], [ExifTag('EXIF', 'Resolution Unit', '2', '296')], [ExifTag('EXIF', 'Software', 'Photos 1.0.1', '305')], [ExifTag('EXIF', 'X Resolution', '72', '282')], [ExifTag('EXIF', 'Y Resolution', '72', '283')]])
Tests :func:`colour_hdri.utilities.exif.read_exif_tags` definition.
colour_hdri/utilities/tests/test_exif.py
test_read_exif_tags
colour-science/colour-hdri
92
python
def test_read_exif_tags(self): '\n \n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] exif_data = vivified_to_dict(read_exif_tags(test_jpg_image)) self.assertIsInstance(exif_data, type(dict())) self.assertListEqual(sorted(exif_data.keys()), ['Composite', 'EXIF', 'ExifTool', 'File', 'ICC_Profile', 'JFIF', 'Photoshop', 'XMP']) self.assertListEqual(sorted(exif_data['EXIF'].values()), [[ExifTag('EXIF', 'Camera Model Name', 'EOS 5D Mark II', '272')], [ExifTag('EXIF', 'Create Date', '2015:09:19 03:39:20', '36868')], [ExifTag('EXIF', 'Date/Time Original', '2015:09:19 03:39:20', '36867')], [ExifTag('EXIF', 'Exif Image Height', '426', '40963')], [ExifTag('EXIF', 'Exif Image Width', '640', '40962')], [ExifTag('EXIF', 'Exposure Time', '0.125', '33434')], [ExifTag('EXIF', 'F Number', '8', '33437')], [ExifTag('EXIF', 'Focal Length', '16', '37386')], [ExifTag('EXIF', 'ISO', '100', '34855')], [ExifTag('EXIF', 'Make', 'Canon', '271')], [ExifTag('EXIF', 'Modify Date', '2015:09:19 03:39:20', '306')], [ExifTag('EXIF', 'Orientation', '1', '274')], [ExifTag('EXIF', 'Photometric Interpretation', '2', '262')], [ExifTag('EXIF', 'Resolution Unit', '2', '296')], [ExifTag('EXIF', 'Software', 'Photos 1.0.1', '305')], [ExifTag('EXIF', 'X Resolution', '72', '282')], [ExifTag('EXIF', 'Y Resolution', '72', '283')]])
def test_read_exif_tags(self): '\n \n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] exif_data = vivified_to_dict(read_exif_tags(test_jpg_image)) self.assertIsInstance(exif_data, type(dict())) self.assertListEqual(sorted(exif_data.keys()), ['Composite', 'EXIF', 'ExifTool', 'File', 'ICC_Profile', 'JFIF', 'Photoshop', 'XMP']) self.assertListEqual(sorted(exif_data['EXIF'].values()), [[ExifTag('EXIF', 'Camera Model Name', 'EOS 5D Mark II', '272')], [ExifTag('EXIF', 'Create Date', '2015:09:19 03:39:20', '36868')], [ExifTag('EXIF', 'Date/Time Original', '2015:09:19 03:39:20', '36867')], [ExifTag('EXIF', 'Exif Image Height', '426', '40963')], [ExifTag('EXIF', 'Exif Image Width', '640', '40962')], [ExifTag('EXIF', 'Exposure Time', '0.125', '33434')], [ExifTag('EXIF', 'F Number', '8', '33437')], [ExifTag('EXIF', 'Focal Length', '16', '37386')], [ExifTag('EXIF', 'ISO', '100', '34855')], [ExifTag('EXIF', 'Make', 'Canon', '271')], [ExifTag('EXIF', 'Modify Date', '2015:09:19 03:39:20', '306')], [ExifTag('EXIF', 'Orientation', '1', '274')], [ExifTag('EXIF', 'Photometric Interpretation', '2', '262')], [ExifTag('EXIF', 'Resolution Unit', '2', '296')], [ExifTag('EXIF', 'Software', 'Photos 1.0.1', '305')], [ExifTag('EXIF', 'X Resolution', '72', '282')], [ExifTag('EXIF', 'Y Resolution', '72', '283')]])<|docstring|>Tests :func:`colour_hdri.utilities.exif.read_exif_tags` definition.<|endoftext|>
41dc023123784d4e46888719acf8e62d011c740f49eef70406c3a47d5b7eef23
def setUp(self): '\n Initialises common tests attributes.\n ' self._temporary_directory = tempfile.mkdtemp()
Initialises common tests attributes.
colour_hdri/utilities/tests/test_exif.py
setUp
colour-science/colour-hdri
92
python
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()<|docstring|>Initialises common tests attributes.<|endoftext|>
ebcbcb9ea0b8083c9de351475f09c7771820d6cd767ff0d307c2cab1efef7028
def tearDown(self): '\n After tests actions.\n ' shutil.rmtree(self._temporary_directory)
After tests actions.
colour_hdri/utilities/tests/test_exif.py
tearDown
colour-science/colour-hdri
92
python
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)<|docstring|>After tests actions.<|endoftext|>
a6aaa48d996dfb975a12809f6b469adca151082414e0a4d6e4c3191ee6c70962
def test_copy_exif_tags(self): '\n Tests :func:`colour_hdri.utilities.exif.copy_exif_tags` definition.\n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '') copy_exif_tags(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')
Tests :func:`colour_hdri.utilities.exif.copy_exif_tags` definition.
colour_hdri/utilities/tests/test_exif.py
test_copy_exif_tags
colour-science/colour-hdri
92
python
def test_copy_exif_tags(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), ) copy_exif_tags(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')
def test_copy_exif_tags(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), ) copy_exif_tags(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')<|docstring|>Tests :func:`colour_hdri.utilities.exif.copy_exif_tags` definition.<|endoftext|>
41dc023123784d4e46888719acf8e62d011c740f49eef70406c3a47d5b7eef23
def setUp(self): '\n Initialises common tests attributes.\n ' self._temporary_directory = tempfile.mkdtemp()
Initialises common tests attributes.
colour_hdri/utilities/tests/test_exif.py
setUp
colour-science/colour-hdri
92
python
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()<|docstring|>Initialises common tests attributes.<|endoftext|>
ebcbcb9ea0b8083c9de351475f09c7771820d6cd767ff0d307c2cab1efef7028
def tearDown(self): '\n After tests actions.\n ' shutil.rmtree(self._temporary_directory)
After tests actions.
colour_hdri/utilities/tests/test_exif.py
tearDown
colour-science/colour-hdri
92
python
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)<|docstring|>After tests actions.<|endoftext|>
bd9979619051a176fbca0f5e631c2a3a97748ae7b46ca94ae10afb798f19503c
def test_update_exif_tags(self): '\n Tests :func:`colour_hdri.utilities.exif.update_exif_tags` definition.\n ' reference_jpg_images = filter_files(FROBISHER_001_DIRECTORY, ('jpg',)) test_jpg_images = [] for reference_jpg_image in reference_jpg_images: test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '') test_jpg_images.append(test_jpg_image) update_exif_tags(zip(reference_jpg_images, test_jpg_images)) for test_jpg_image in test_jpg_images: self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')
Tests :func:`colour_hdri.utilities.exif.update_exif_tags` definition.
colour_hdri/utilities/tests/test_exif.py
test_update_exif_tags
colour-science/colour-hdri
92
python
def test_update_exif_tags(self): '\n \n ' reference_jpg_images = filter_files(FROBISHER_001_DIRECTORY, ('jpg',)) test_jpg_images = [] for reference_jpg_image in reference_jpg_images: test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), ) test_jpg_images.append(test_jpg_image) update_exif_tags(zip(reference_jpg_images, test_jpg_images)) for test_jpg_image in test_jpg_images: self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')
def test_update_exif_tags(self): '\n \n ' reference_jpg_images = filter_files(FROBISHER_001_DIRECTORY, ('jpg',)) test_jpg_images = [] for reference_jpg_image in reference_jpg_images: test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), ) test_jpg_images.append(test_jpg_image) update_exif_tags(zip(reference_jpg_images, test_jpg_images)) for test_jpg_image in test_jpg_images: self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0')<|docstring|>Tests :func:`colour_hdri.utilities.exif.update_exif_tags` definition.<|endoftext|>
41dc023123784d4e46888719acf8e62d011c740f49eef70406c3a47d5b7eef23
def setUp(self): '\n Initialises common tests attributes.\n ' self._temporary_directory = tempfile.mkdtemp()
Initialises common tests attributes.
colour_hdri/utilities/tests/test_exif.py
setUp
colour-science/colour-hdri
92
python
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()<|docstring|>Initialises common tests attributes.<|endoftext|>
ebcbcb9ea0b8083c9de351475f09c7771820d6cd767ff0d307c2cab1efef7028
def tearDown(self): '\n After tests actions.\n ' shutil.rmtree(self._temporary_directory)
After tests actions.
colour_hdri/utilities/tests/test_exif.py
tearDown
colour-science/colour-hdri
92
python
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)<|docstring|>After tests actions.<|endoftext|>
06ec2cb3f0e1004946e66702e330e3db7485609f4218785a2f1d82053087330c
def test_delete_exif_tags(self): '\n Tests :func:`colour_hdri.utilities.exif.delete_exif_tags` definition.\n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '')
Tests :func:`colour_hdri.utilities.exif.delete_exif_tags` definition.
colour_hdri/utilities/tests/test_exif.py
test_delete_exif_tags
colour-science/colour-hdri
92
python
def test_delete_exif_tags(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), )
def test_delete_exif_tags(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') delete_exif_tags(test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), )<|docstring|>Tests :func:`colour_hdri.utilities.exif.delete_exif_tags` definition.<|endoftext|>
e95b1c3e107f92023b1c9bb103852e3b97ae0e8f8a3c1502ff2d7418d818f826
def test_read_exif_tag(self): '\n Tests :func:`colour_hdri.utilities.exif.read_exif_tag` definition.\n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') self.assertEqual(read_exif_tag(test_jpg_image, 'ExposureTime'), '1/8') self.assertEqual(read_exif_tag(test_jpg_image, 'ISO'), '100')
Tests :func:`colour_hdri.utilities.exif.read_exif_tag` definition.
colour_hdri/utilities/tests/test_exif.py
test_read_exif_tag
colour-science/colour-hdri
92
python
def test_read_exif_tag(self): '\n \n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') self.assertEqual(read_exif_tag(test_jpg_image, 'ExposureTime'), '1/8') self.assertEqual(read_exif_tag(test_jpg_image, 'ISO'), '100')
def test_read_exif_tag(self): '\n \n ' test_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] self.assertEqual(read_exif_tag(test_jpg_image, 'Aperture'), '8.0') self.assertEqual(read_exif_tag(test_jpg_image, 'ExposureTime'), '1/8') self.assertEqual(read_exif_tag(test_jpg_image, 'ISO'), '100')<|docstring|>Tests :func:`colour_hdri.utilities.exif.read_exif_tag` definition.<|endoftext|>
41dc023123784d4e46888719acf8e62d011c740f49eef70406c3a47d5b7eef23
def setUp(self): '\n Initialises common tests attributes.\n ' self._temporary_directory = tempfile.mkdtemp()
Initialises common tests attributes.
colour_hdri/utilities/tests/test_exif.py
setUp
colour-science/colour-hdri
92
python
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()
def setUp(self): '\n \n ' self._temporary_directory = tempfile.mkdtemp()<|docstring|>Initialises common tests attributes.<|endoftext|>
ebcbcb9ea0b8083c9de351475f09c7771820d6cd767ff0d307c2cab1efef7028
def tearDown(self): '\n After tests actions.\n ' shutil.rmtree(self._temporary_directory)
After tests actions.
colour_hdri/utilities/tests/test_exif.py
tearDown
colour-science/colour-hdri
92
python
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)
def tearDown(self): '\n \n ' shutil.rmtree(self._temporary_directory)<|docstring|>After tests actions.<|endoftext|>
fd2c168a70af758fbbd2321d5371be8e7a1532b54395ee27ab82da6caa3be902
def test_write_exif_tag(self): '\n Tests :func:`colour_hdri.utilities.exif.write_exif_tag` definition.\n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '8.0') write_exif_tag(test_jpg_image, 'FNumber', '16.0') self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '16.0')
Tests :func:`colour_hdri.utilities.exif.write_exif_tag` definition.
colour_hdri/utilities/tests/test_exif.py
test_write_exif_tag
colour-science/colour-hdri
92
python
def test_write_exif_tag(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '8.0') write_exif_tag(test_jpg_image, 'FNumber', '16.0') self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '16.0')
def test_write_exif_tag(self): '\n \n ' reference_jpg_image = filter_files(FROBISHER_001_DIRECTORY, ('jpg',))[0] test_jpg_image = os.path.join(self._temporary_directory, os.path.basename(reference_jpg_image)) shutil.copyfile(reference_jpg_image, test_jpg_image) self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '8.0') write_exif_tag(test_jpg_image, 'FNumber', '16.0') self.assertEqual(read_exif_tag(test_jpg_image, 'FNumber'), '16.0')<|docstring|>Tests :func:`colour_hdri.utilities.exif.write_exif_tag` definition.<|endoftext|>
86a9bd2a4b0e87eb76657f749300bccc7c153328ee4d9679752b2dde9cf87ba6
def __init__(self, **kwargs): '\n Initializes a new RevocationStatus object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param time_of_revocation:\n The value to assign to the time_of_revocation property of this RevocationStatus.\n :type time_of_revocation: datetime\n\n :param revocation_reason:\n The value to assign to the revocation_reason property of this RevocationStatus.\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n :type revocation_reason: str\n\n ' self.swagger_types = {'time_of_revocation': 'datetime', 'revocation_reason': 'str'} self.attribute_map = {'time_of_revocation': 'timeOfRevocation', 'revocation_reason': 'revocationReason'} self._time_of_revocation = None self._revocation_reason = None
Initializes a new RevocationStatus object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param time_of_revocation: The value to assign to the time_of_revocation property of this RevocationStatus. :type time_of_revocation: datetime :param revocation_reason: The value to assign to the revocation_reason property of this RevocationStatus. Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type revocation_reason: str
src/oci/certificates_management/models/revocation_status.py
__init__
ezequielramos/oci-python-sdk
249
python
def __init__(self, **kwargs): '\n Initializes a new RevocationStatus object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param time_of_revocation:\n The value to assign to the time_of_revocation property of this RevocationStatus.\n :type time_of_revocation: datetime\n\n :param revocation_reason:\n The value to assign to the revocation_reason property of this RevocationStatus.\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n :type revocation_reason: str\n\n ' self.swagger_types = {'time_of_revocation': 'datetime', 'revocation_reason': 'str'} self.attribute_map = {'time_of_revocation': 'timeOfRevocation', 'revocation_reason': 'revocationReason'} self._time_of_revocation = None self._revocation_reason = None
def __init__(self, **kwargs): '\n Initializes a new RevocationStatus object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param time_of_revocation:\n The value to assign to the time_of_revocation property of this RevocationStatus.\n :type time_of_revocation: datetime\n\n :param revocation_reason:\n The value to assign to the revocation_reason property of this RevocationStatus.\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n :type revocation_reason: str\n\n ' self.swagger_types = {'time_of_revocation': 'datetime', 'revocation_reason': 'str'} self.attribute_map = {'time_of_revocation': 'timeOfRevocation', 'revocation_reason': 'revocationReason'} self._time_of_revocation = None self._revocation_reason = None<|docstring|>Initializes a new RevocationStatus object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param time_of_revocation: The value to assign to the time_of_revocation property of this RevocationStatus. :type time_of_revocation: datetime :param revocation_reason: The value to assign to the revocation_reason property of this RevocationStatus. Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type revocation_reason: str<|endoftext|>
91cf74a544e817bf256ac96e032636c209b0f9d669ba4dde482fb245be8a261b
@property def time_of_revocation(self): '\n **[Required]** Gets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :return: The time_of_revocation of this RevocationStatus.\n :rtype: datetime\n ' return self._time_of_revocation
**[Required]** Gets the time_of_revocation of this RevocationStatus. The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format. Example: `2019-04-03T21:10:29.600Z` __ https://tools.ietf.org/html/rfc3339 :return: The time_of_revocation of this RevocationStatus. :rtype: datetime
src/oci/certificates_management/models/revocation_status.py
time_of_revocation
ezequielramos/oci-python-sdk
249
python
@property def time_of_revocation(self): '\n **[Required]** Gets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :return: The time_of_revocation of this RevocationStatus.\n :rtype: datetime\n ' return self._time_of_revocation
@property def time_of_revocation(self): '\n **[Required]** Gets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :return: The time_of_revocation of this RevocationStatus.\n :rtype: datetime\n ' return self._time_of_revocation<|docstring|>**[Required]** Gets the time_of_revocation of this RevocationStatus. The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format. Example: `2019-04-03T21:10:29.600Z` __ https://tools.ietf.org/html/rfc3339 :return: The time_of_revocation of this RevocationStatus. :rtype: datetime<|endoftext|>
3d0930f981f45762f2a15148e5854c43e5b59e88fc2072a15149b1980fa93351
@time_of_revocation.setter def time_of_revocation(self, time_of_revocation): '\n Sets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :param time_of_revocation: The time_of_revocation of this RevocationStatus.\n :type: datetime\n ' self._time_of_revocation = time_of_revocation
Sets the time_of_revocation of this RevocationStatus. The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format. Example: `2019-04-03T21:10:29.600Z` __ https://tools.ietf.org/html/rfc3339 :param time_of_revocation: The time_of_revocation of this RevocationStatus. :type: datetime
src/oci/certificates_management/models/revocation_status.py
time_of_revocation
ezequielramos/oci-python-sdk
249
python
@time_of_revocation.setter def time_of_revocation(self, time_of_revocation): '\n Sets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :param time_of_revocation: The time_of_revocation of this RevocationStatus.\n :type: datetime\n ' self._time_of_revocation = time_of_revocation
@time_of_revocation.setter def time_of_revocation(self, time_of_revocation): '\n Sets the time_of_revocation of this RevocationStatus.\n The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format.\n Example: `2019-04-03T21:10:29.600Z`\n\n __ https://tools.ietf.org/html/rfc3339\n\n\n :param time_of_revocation: The time_of_revocation of this RevocationStatus.\n :type: datetime\n ' self._time_of_revocation = time_of_revocation<|docstring|>Sets the time_of_revocation of this RevocationStatus. The time when the entity was revoked, expressed in `RFC 3339`__ timestamp format. Example: `2019-04-03T21:10:29.600Z` __ https://tools.ietf.org/html/rfc3339 :param time_of_revocation: The time_of_revocation of this RevocationStatus. :type: datetime<|endoftext|>
40982714f46e6e718dc391b253d5f584554fa1add8e7831c6a1e0495e67c1f6d
@property def revocation_reason(self): '\n **[Required]** Gets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n\n\n :return: The revocation_reason of this RevocationStatus.\n :rtype: str\n ' return self._revocation_reason
**[Required]** Gets the revocation_reason of this RevocationStatus. The reason the certificate or certificate authority (CA) was revoked. Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The revocation_reason of this RevocationStatus. :rtype: str
src/oci/certificates_management/models/revocation_status.py
revocation_reason
ezequielramos/oci-python-sdk
249
python
@property def revocation_reason(self): '\n **[Required]** Gets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n\n\n :return: The revocation_reason of this RevocationStatus.\n :rtype: str\n ' return self._revocation_reason
@property def revocation_reason(self): '\n **[Required]** Gets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", \'UNKNOWN_ENUM_VALUE\'.\n Any unrecognized values returned by a service will be mapped to \'UNKNOWN_ENUM_VALUE\'.\n\n\n :return: The revocation_reason of this RevocationStatus.\n :rtype: str\n ' return self._revocation_reason<|docstring|>**[Required]** Gets the revocation_reason of this RevocationStatus. The reason the certificate or certificate authority (CA) was revoked. Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The revocation_reason of this RevocationStatus. :rtype: str<|endoftext|>
3ee5b070dfa246189fe995dbd5e72b0a78288f4776bc0e36441a7c4c16f95efa
@revocation_reason.setter def revocation_reason(self, revocation_reason): '\n Sets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n\n :param revocation_reason: The revocation_reason of this RevocationStatus.\n :type: str\n ' allowed_values = ['UNSPECIFIED', 'KEY_COMPROMISE', 'CA_COMPROMISE', 'AFFILIATION_CHANGED', 'SUPERSEDED', 'CESSATION_OF_OPERATION', 'PRIVILEGE_WITHDRAWN', 'AA_COMPROMISE'] if (not value_allowed_none_or_none_sentinel(revocation_reason, allowed_values)): revocation_reason = 'UNKNOWN_ENUM_VALUE' self._revocation_reason = revocation_reason
Sets the revocation_reason of this RevocationStatus. The reason the certificate or certificate authority (CA) was revoked. :param revocation_reason: The revocation_reason of this RevocationStatus. :type: str
src/oci/certificates_management/models/revocation_status.py
revocation_reason
ezequielramos/oci-python-sdk
249
python
@revocation_reason.setter def revocation_reason(self, revocation_reason): '\n Sets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n\n :param revocation_reason: The revocation_reason of this RevocationStatus.\n :type: str\n ' allowed_values = ['UNSPECIFIED', 'KEY_COMPROMISE', 'CA_COMPROMISE', 'AFFILIATION_CHANGED', 'SUPERSEDED', 'CESSATION_OF_OPERATION', 'PRIVILEGE_WITHDRAWN', 'AA_COMPROMISE'] if (not value_allowed_none_or_none_sentinel(revocation_reason, allowed_values)): revocation_reason = 'UNKNOWN_ENUM_VALUE' self._revocation_reason = revocation_reason
@revocation_reason.setter def revocation_reason(self, revocation_reason): '\n Sets the revocation_reason of this RevocationStatus.\n The reason the certificate or certificate authority (CA) was revoked.\n\n\n :param revocation_reason: The revocation_reason of this RevocationStatus.\n :type: str\n ' allowed_values = ['UNSPECIFIED', 'KEY_COMPROMISE', 'CA_COMPROMISE', 'AFFILIATION_CHANGED', 'SUPERSEDED', 'CESSATION_OF_OPERATION', 'PRIVILEGE_WITHDRAWN', 'AA_COMPROMISE'] if (not value_allowed_none_or_none_sentinel(revocation_reason, allowed_values)): revocation_reason = 'UNKNOWN_ENUM_VALUE' self._revocation_reason = revocation_reason<|docstring|>Sets the revocation_reason of this RevocationStatus. The reason the certificate or certificate authority (CA) was revoked. :param revocation_reason: The revocation_reason of this RevocationStatus. :type: str<|endoftext|>
b777edfcd80d237c5f5044c0972cd02cba762784b8583b0f1ea52b11c8187ffe
def revision_with_match(pattern=None, prefix=False, all_refs=False, return_unmatched=False): 'Return a description of the current commit\n\n Keyword arguments:\n pattern -- (Default: None) Use only refs that match this pattern.\n prefix -- (Default: False) If True, the pattern is considered a prefix\n and does not require an exact match.\n all_refs -- (Default: False) If True, consider all refs, not just tags.\n return_unmatched -- (Default: False) If False and a pattern is given that\n cannot be matched, return the empty string. If True, return\n the unmatched description nonetheless.\n ' command = REVISION_CMD[:] if pattern: command += ['--match', (pattern + ('*' if prefix else ''))] if all_refs: command += ['--all', '--long'] description = run(command) if (pattern and (not return_unmatched) and (not description.startswith(pattern))): return '' return description
Return a description of the current commit Keyword arguments: pattern -- (Default: None) Use only refs that match this pattern. prefix -- (Default: False) If True, the pattern is considered a prefix and does not require an exact match. all_refs -- (Default: False) If True, consider all refs, not just tags. return_unmatched -- (Default: False) If False and a pattern is given that cannot be matched, return the empty string. If True, return the unmatched description nonetheless.
tools/workspace_status_release.py
revision_with_match
ArsenSysyn/gerrit
623
python
def revision_with_match(pattern=None, prefix=False, all_refs=False, return_unmatched=False): 'Return a description of the current commit\n\n Keyword arguments:\n pattern -- (Default: None) Use only refs that match this pattern.\n prefix -- (Default: False) If True, the pattern is considered a prefix\n and does not require an exact match.\n all_refs -- (Default: False) If True, consider all refs, not just tags.\n return_unmatched -- (Default: False) If False and a pattern is given that\n cannot be matched, return the empty string. If True, return\n the unmatched description nonetheless.\n ' command = REVISION_CMD[:] if pattern: command += ['--match', (pattern + ('*' if prefix else ))] if all_refs: command += ['--all', '--long'] description = run(command) if (pattern and (not return_unmatched) and (not description.startswith(pattern))): return return description
def revision_with_match(pattern=None, prefix=False, all_refs=False, return_unmatched=False): 'Return a description of the current commit\n\n Keyword arguments:\n pattern -- (Default: None) Use only refs that match this pattern.\n prefix -- (Default: False) If True, the pattern is considered a prefix\n and does not require an exact match.\n all_refs -- (Default: False) If True, consider all refs, not just tags.\n return_unmatched -- (Default: False) If False and a pattern is given that\n cannot be matched, return the empty string. If True, return\n the unmatched description nonetheless.\n ' command = REVISION_CMD[:] if pattern: command += ['--match', (pattern + ('*' if prefix else ))] if all_refs: command += ['--all', '--long'] description = run(command) if (pattern and (not return_unmatched) and (not description.startswith(pattern))): return return description<|docstring|>Return a description of the current commit Keyword arguments: pattern -- (Default: None) Use only refs that match this pattern. prefix -- (Default: False) If True, the pattern is considered a prefix and does not require an exact match. all_refs -- (Default: False) If True, consider all refs, not just tags. return_unmatched -- (Default: False) If False and a pattern is given that cannot be matched, return the empty string. If True, return the unmatched description nonetheless.<|endoftext|>
46de940cb6c13f83a8a5e5f1264d52cfb1a2e097c547bd7f447e7bc5f9ee62fd
@GuildBaseCog.group(name='купить', aliases=['магазин'], invoke_without_command=True) async def role_shop(self, ctx): 'Displays all of the roles which can be purchased from role shop.' (await ctx.message.delete(delay=5)) if (not ctx.guild_profile.roleshop): return (await ctx.send_line(f'{ctx.emotes.imortal_boost.g8} На данный момент в магазине пусто.')) paginator = ctx.get_field_paginator(ctx.guild_profile.roleshop.roles, entry_parser=self._paginator_parser) paginator.embed.description = '```css\n Добро пожаловать в лавку !```' paginator.embed.set_author(name='Магазин ролей', icon_url=ctx.author.avatar_url) (await paginator.paginate())
Displays all of the roles which can be purchased from role shop.
cosmos/galaxies/guild/roleshop/base.py
role_shop
MrFreemanser/Bot
0
python
@GuildBaseCog.group(name='купить', aliases=['магазин'], invoke_without_command=True) async def role_shop(self, ctx): (await ctx.message.delete(delay=5)) if (not ctx.guild_profile.roleshop): return (await ctx.send_line(f'{ctx.emotes.imortal_boost.g8} На данный момент в магазине пусто.')) paginator = ctx.get_field_paginator(ctx.guild_profile.roleshop.roles, entry_parser=self._paginator_parser) paginator.embed.description = '```css\n Добро пожаловать в лавку !```' paginator.embed.set_author(name='Магазин ролей', icon_url=ctx.author.avatar_url) (await paginator.paginate())
@GuildBaseCog.group(name='купить', aliases=['магазин'], invoke_without_command=True) async def role_shop(self, ctx): (await ctx.message.delete(delay=5)) if (not ctx.guild_profile.roleshop): return (await ctx.send_line(f'{ctx.emotes.imortal_boost.g8} На данный момент в магазине пусто.')) paginator = ctx.get_field_paginator(ctx.guild_profile.roleshop.roles, entry_parser=self._paginator_parser) paginator.embed.description = '```css\n Добро пожаловать в лавку !```' paginator.embed.set_author(name='Магазин ролей', icon_url=ctx.author.avatar_url) (await paginator.paginate())<|docstring|>Displays all of the roles which can be purchased from role shop.<|endoftext|>
3d3acba93ac4cf45f94ea2f9e285bb35a80aa8f862d92afbcb41c299dc7cbdb9
def new_nip_project(working_directory): ' Helper for created a bare project ' child = pexpect.spawn('nip init', cwd=working_directory) child.sendline('') child.sendline('') child.sendline('') child.sendline('') while child.isalive(): time.sleep(0.2) return child
Helper for created a bare project
tests/helpers.py
new_nip_project
RopePy/nip
1
python
def new_nip_project(working_directory): ' ' child = pexpect.spawn('nip init', cwd=working_directory) child.sendline() child.sendline() child.sendline() child.sendline() while child.isalive(): time.sleep(0.2) return child
def new_nip_project(working_directory): ' ' child = pexpect.spawn('nip init', cwd=working_directory) child.sendline() child.sendline() child.sendline() child.sendline() while child.isalive(): time.sleep(0.2) return child<|docstring|>Helper for created a bare project<|endoftext|>
7773944bfc7d1048aa0db4f42b85c885df34be3d369ef52a19e20c7cb9c7e33a
def text(img: Image, *, threshold: float=0.8) -> Text: 'Regcognize text line, background color should be black.\n\n Args:\n img (Image): Preprocessed text line.\n\n Returns:\n Text: Text content\n ' reload_on_demand() ret = '' (w, h) = (img.width, img.height) if (img.height < _LINE_HEIGHT): w = round(((_LINE_HEIGHT / h) * w)) h = _LINE_HEIGHT img = img.resize((w, h)) cv_img = np.asarray(img.convert('L')) (_, binary_img) = cv2.threshold(cv_img, 0, 255, cv2.THRESH_OTSU) (contours, _) = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if (len(contours) == 0): app.log.image('ocr result is empty', img, level=app.DEBUG) return '' contours_with_bbox = sorted(((i, _rect2bbox(cv2.boundingRect(i))) for i in contours), key=(lambda x: x[1][0])) max_char_width = max(((bbox[2] - bbox[0]) for (_, bbox) in contours_with_bbox)) max_char_height = max(((bbox[3] - bbox[1]) for (_, bbox) in contours_with_bbox)) max_char_width = max((max_char_height + 2), max_char_width) char_img_list: List[Tuple[(Tuple[(int, int, int, int)], np.ndarray)]] = [] char_parts: List[np.ndarray] = [] char_bbox = contours_with_bbox[0][1] char_non_zero_bbox = contours_with_bbox[0][1] def _crop_char(bbox: Tuple[(int, int, int, int)], img: np.ndarray): non_zero_pos_list = cv2.findNonZero(img) (l0, t0, r0, b0) = bbox (_, _, w0, h0) = _bbox2rect(bbox) non_zero_rect = cv2.boundingRect(non_zero_pos_list) (_, _, w1, h1) = non_zero_rect (l1, t1, r1, b1) = _rect2bbox(non_zero_rect) (ml, mt, mr, mb) = (l1, t1, (w0 - r1), (h0 - b1)) ret = img if (w1 > (max_char_width * 0.3)): l0 += ml r0 -= mr ret = ret[(:, l1:r1)] if (h1 > (max_char_height * 0.5)): t0 += mt b0 -= mb ret = ret[t1:b1] return ((l0, t0, r0, b0), ret) def _push_char(): if (not char_parts): return mask = np.zeros_like(binary_img) cv2.drawContours(mask, char_parts, (- 1), (255,), thickness=cv2.FILLED) char_img = cv2.copyTo(binary_img, mask) (l, t, r, b) = char_bbox char_img = char_img[(t:b, l:r)] char_img_list.append((char_bbox, char_img)) def _get_expanded_bbox(index: int) -> Tuple[(int, int, int, int)]: (_, bbox) = contours_with_bbox[index] if ((index + 1) < len(contours_with_bbox)): (_, next_bbox) = contours_with_bbox[(index + 1)] if ((next_bbox[0] - bbox[2]) < 2): bbox = _union_bbox(bbox, _get_expanded_bbox((index + 1))) return bbox for (index, v) in enumerate(contours_with_bbox): (i, _) = v bbox = _get_expanded_bbox(index) (l, t, r, b) = bbox is_new_char = (char_parts and (l > char_non_zero_bbox[2]) and (((l - char_non_zero_bbox[0]) > (max_char_width * 0.8)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.2)) or ((r - char_non_zero_bbox[0]) > max_char_width) or (((char_non_zero_bbox[3] - char_non_zero_bbox[1]) < (max_char_height * 0.6)) and (((r - l) > (max_char_width * 0.6)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.1)))) or (((b - t) < (max_char_height * 0.4)) and (l > (char_non_zero_bbox[2] + 1)) and (l > (char_non_zero_bbox[0] + (max_char_width * 0.3))))) and (not _bbox_contains(_pad_bbox(char_bbox, 2), bbox))) if is_new_char: space_w = (l - char_bbox[2]) divide_x = int(((l - (space_w * 0.5)) - 1)) last_r = min(divide_x, (char_bbox[0] + max_char_width)) char_bbox = _union_bbox(char_bbox, (last_r, t, last_r, b)) _push_char() char_parts = [] char_bbox = (max((last_r + 1), (r - max_char_width)), char_bbox[1], r, int((char_bbox[1] + max_char_height))) char_non_zero_bbox = bbox char_parts.append(i) char_non_zero_bbox = _union_bbox(char_non_zero_bbox, bbox) char_bbox = _union_bbox(char_bbox, bbox) _push_char() cropped_char_img_list = [_crop_char(bbox, img) for (bbox, img) in char_img_list] if (os.getenv('DEBUG') == __name__): segmentation_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for i in contours: (x, y, w, h) = cv2.boundingRect(i) cv2.rectangle(segmentation_img, (x, y), ((x + w), (y + h)), (0, 0, 255), thickness=1) chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in char_img_list: (l, t, r, b) = bbox cv2.rectangle(chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) cropped_chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in cropped_char_img_list: (l, t, r, b) = bbox cv2.rectangle(cropped_chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img, 'segmentation': segmentation_img, 'chars': chars_img, 'cropped chars': cropped_chars_img}) else: app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img}) for (_, i) in cropped_char_img_list: ret += _text_from_image(i, threshold) app.log.text(('ocr result: %s' % ret), level=app.DEBUG) return ret
Regcognize text line, background color should be black. Args: img (Image): Preprocessed text line. Returns: Text: Text content
auto_derby/ocr.py
text
gentle-knight-13/auto-derby
0
python
def text(img: Image, *, threshold: float=0.8) -> Text: 'Regcognize text line, background color should be black.\n\n Args:\n img (Image): Preprocessed text line.\n\n Returns:\n Text: Text content\n ' reload_on_demand() ret = (w, h) = (img.width, img.height) if (img.height < _LINE_HEIGHT): w = round(((_LINE_HEIGHT / h) * w)) h = _LINE_HEIGHT img = img.resize((w, h)) cv_img = np.asarray(img.convert('L')) (_, binary_img) = cv2.threshold(cv_img, 0, 255, cv2.THRESH_OTSU) (contours, _) = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if (len(contours) == 0): app.log.image('ocr result is empty', img, level=app.DEBUG) return contours_with_bbox = sorted(((i, _rect2bbox(cv2.boundingRect(i))) for i in contours), key=(lambda x: x[1][0])) max_char_width = max(((bbox[2] - bbox[0]) for (_, bbox) in contours_with_bbox)) max_char_height = max(((bbox[3] - bbox[1]) for (_, bbox) in contours_with_bbox)) max_char_width = max((max_char_height + 2), max_char_width) char_img_list: List[Tuple[(Tuple[(int, int, int, int)], np.ndarray)]] = [] char_parts: List[np.ndarray] = [] char_bbox = contours_with_bbox[0][1] char_non_zero_bbox = contours_with_bbox[0][1] def _crop_char(bbox: Tuple[(int, int, int, int)], img: np.ndarray): non_zero_pos_list = cv2.findNonZero(img) (l0, t0, r0, b0) = bbox (_, _, w0, h0) = _bbox2rect(bbox) non_zero_rect = cv2.boundingRect(non_zero_pos_list) (_, _, w1, h1) = non_zero_rect (l1, t1, r1, b1) = _rect2bbox(non_zero_rect) (ml, mt, mr, mb) = (l1, t1, (w0 - r1), (h0 - b1)) ret = img if (w1 > (max_char_width * 0.3)): l0 += ml r0 -= mr ret = ret[(:, l1:r1)] if (h1 > (max_char_height * 0.5)): t0 += mt b0 -= mb ret = ret[t1:b1] return ((l0, t0, r0, b0), ret) def _push_char(): if (not char_parts): return mask = np.zeros_like(binary_img) cv2.drawContours(mask, char_parts, (- 1), (255,), thickness=cv2.FILLED) char_img = cv2.copyTo(binary_img, mask) (l, t, r, b) = char_bbox char_img = char_img[(t:b, l:r)] char_img_list.append((char_bbox, char_img)) def _get_expanded_bbox(index: int) -> Tuple[(int, int, int, int)]: (_, bbox) = contours_with_bbox[index] if ((index + 1) < len(contours_with_bbox)): (_, next_bbox) = contours_with_bbox[(index + 1)] if ((next_bbox[0] - bbox[2]) < 2): bbox = _union_bbox(bbox, _get_expanded_bbox((index + 1))) return bbox for (index, v) in enumerate(contours_with_bbox): (i, _) = v bbox = _get_expanded_bbox(index) (l, t, r, b) = bbox is_new_char = (char_parts and (l > char_non_zero_bbox[2]) and (((l - char_non_zero_bbox[0]) > (max_char_width * 0.8)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.2)) or ((r - char_non_zero_bbox[0]) > max_char_width) or (((char_non_zero_bbox[3] - char_non_zero_bbox[1]) < (max_char_height * 0.6)) and (((r - l) > (max_char_width * 0.6)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.1)))) or (((b - t) < (max_char_height * 0.4)) and (l > (char_non_zero_bbox[2] + 1)) and (l > (char_non_zero_bbox[0] + (max_char_width * 0.3))))) and (not _bbox_contains(_pad_bbox(char_bbox, 2), bbox))) if is_new_char: space_w = (l - char_bbox[2]) divide_x = int(((l - (space_w * 0.5)) - 1)) last_r = min(divide_x, (char_bbox[0] + max_char_width)) char_bbox = _union_bbox(char_bbox, (last_r, t, last_r, b)) _push_char() char_parts = [] char_bbox = (max((last_r + 1), (r - max_char_width)), char_bbox[1], r, int((char_bbox[1] + max_char_height))) char_non_zero_bbox = bbox char_parts.append(i) char_non_zero_bbox = _union_bbox(char_non_zero_bbox, bbox) char_bbox = _union_bbox(char_bbox, bbox) _push_char() cropped_char_img_list = [_crop_char(bbox, img) for (bbox, img) in char_img_list] if (os.getenv('DEBUG') == __name__): segmentation_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for i in contours: (x, y, w, h) = cv2.boundingRect(i) cv2.rectangle(segmentation_img, (x, y), ((x + w), (y + h)), (0, 0, 255), thickness=1) chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in char_img_list: (l, t, r, b) = bbox cv2.rectangle(chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) cropped_chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in cropped_char_img_list: (l, t, r, b) = bbox cv2.rectangle(cropped_chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img, 'segmentation': segmentation_img, 'chars': chars_img, 'cropped chars': cropped_chars_img}) else: app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img}) for (_, i) in cropped_char_img_list: ret += _text_from_image(i, threshold) app.log.text(('ocr result: %s' % ret), level=app.DEBUG) return ret
def text(img: Image, *, threshold: float=0.8) -> Text: 'Regcognize text line, background color should be black.\n\n Args:\n img (Image): Preprocessed text line.\n\n Returns:\n Text: Text content\n ' reload_on_demand() ret = (w, h) = (img.width, img.height) if (img.height < _LINE_HEIGHT): w = round(((_LINE_HEIGHT / h) * w)) h = _LINE_HEIGHT img = img.resize((w, h)) cv_img = np.asarray(img.convert('L')) (_, binary_img) = cv2.threshold(cv_img, 0, 255, cv2.THRESH_OTSU) (contours, _) = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if (len(contours) == 0): app.log.image('ocr result is empty', img, level=app.DEBUG) return contours_with_bbox = sorted(((i, _rect2bbox(cv2.boundingRect(i))) for i in contours), key=(lambda x: x[1][0])) max_char_width = max(((bbox[2] - bbox[0]) for (_, bbox) in contours_with_bbox)) max_char_height = max(((bbox[3] - bbox[1]) for (_, bbox) in contours_with_bbox)) max_char_width = max((max_char_height + 2), max_char_width) char_img_list: List[Tuple[(Tuple[(int, int, int, int)], np.ndarray)]] = [] char_parts: List[np.ndarray] = [] char_bbox = contours_with_bbox[0][1] char_non_zero_bbox = contours_with_bbox[0][1] def _crop_char(bbox: Tuple[(int, int, int, int)], img: np.ndarray): non_zero_pos_list = cv2.findNonZero(img) (l0, t0, r0, b0) = bbox (_, _, w0, h0) = _bbox2rect(bbox) non_zero_rect = cv2.boundingRect(non_zero_pos_list) (_, _, w1, h1) = non_zero_rect (l1, t1, r1, b1) = _rect2bbox(non_zero_rect) (ml, mt, mr, mb) = (l1, t1, (w0 - r1), (h0 - b1)) ret = img if (w1 > (max_char_width * 0.3)): l0 += ml r0 -= mr ret = ret[(:, l1:r1)] if (h1 > (max_char_height * 0.5)): t0 += mt b0 -= mb ret = ret[t1:b1] return ((l0, t0, r0, b0), ret) def _push_char(): if (not char_parts): return mask = np.zeros_like(binary_img) cv2.drawContours(mask, char_parts, (- 1), (255,), thickness=cv2.FILLED) char_img = cv2.copyTo(binary_img, mask) (l, t, r, b) = char_bbox char_img = char_img[(t:b, l:r)] char_img_list.append((char_bbox, char_img)) def _get_expanded_bbox(index: int) -> Tuple[(int, int, int, int)]: (_, bbox) = contours_with_bbox[index] if ((index + 1) < len(contours_with_bbox)): (_, next_bbox) = contours_with_bbox[(index + 1)] if ((next_bbox[0] - bbox[2]) < 2): bbox = _union_bbox(bbox, _get_expanded_bbox((index + 1))) return bbox for (index, v) in enumerate(contours_with_bbox): (i, _) = v bbox = _get_expanded_bbox(index) (l, t, r, b) = bbox is_new_char = (char_parts and (l > char_non_zero_bbox[2]) and (((l - char_non_zero_bbox[0]) > (max_char_width * 0.8)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.2)) or ((r - char_non_zero_bbox[0]) > max_char_width) or (((char_non_zero_bbox[3] - char_non_zero_bbox[1]) < (max_char_height * 0.6)) and (((r - l) > (max_char_width * 0.6)) or ((l - char_non_zero_bbox[2]) > (max_char_width * 0.1)))) or (((b - t) < (max_char_height * 0.4)) and (l > (char_non_zero_bbox[2] + 1)) and (l > (char_non_zero_bbox[0] + (max_char_width * 0.3))))) and (not _bbox_contains(_pad_bbox(char_bbox, 2), bbox))) if is_new_char: space_w = (l - char_bbox[2]) divide_x = int(((l - (space_w * 0.5)) - 1)) last_r = min(divide_x, (char_bbox[0] + max_char_width)) char_bbox = _union_bbox(char_bbox, (last_r, t, last_r, b)) _push_char() char_parts = [] char_bbox = (max((last_r + 1), (r - max_char_width)), char_bbox[1], r, int((char_bbox[1] + max_char_height))) char_non_zero_bbox = bbox char_parts.append(i) char_non_zero_bbox = _union_bbox(char_non_zero_bbox, bbox) char_bbox = _union_bbox(char_bbox, bbox) _push_char() cropped_char_img_list = [_crop_char(bbox, img) for (bbox, img) in char_img_list] if (os.getenv('DEBUG') == __name__): segmentation_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for i in contours: (x, y, w, h) = cv2.boundingRect(i) cv2.rectangle(segmentation_img, (x, y), ((x + w), (y + h)), (0, 0, 255), thickness=1) chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in char_img_list: (l, t, r, b) = bbox cv2.rectangle(chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) cropped_chars_img = cv2.cvtColor(binary_img, cv2.COLOR_GRAY2BGR) for (bbox, _) in cropped_char_img_list: (l, t, r, b) = bbox cv2.rectangle(cropped_chars_img, (l, t), (r, b), (0, 0, 255), thickness=1) app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img, 'segmentation': segmentation_img, 'chars': chars_img, 'cropped chars': cropped_chars_img}) else: app.log.image('text', cv_img, level=app.DEBUG, layers={'binary': binary_img}) for (_, i) in cropped_char_img_list: ret += _text_from_image(i, threshold) app.log.text(('ocr result: %s' % ret), level=app.DEBUG) return ret<|docstring|>Regcognize text line, background color should be black. Args: img (Image): Preprocessed text line. Returns: Text: Text content<|endoftext|>
d5d175b4b60185881607655ff4e46fa724fb7aa67adae3f09c08e2244d84944c
def process_mention_pairs_to_output(mention_pairs): '\n Processes the given mention pairs into a tensor, Y, with the expected output.\n ' Y = np.empty(shape=(len(mention_pairs),), dtype='int32') for idx in range(len(mention_pairs)): Y[idx] = (1 if mention_pairs[idx].is_coreferent else 0) return Y
Processes the given mention pairs into a tensor, Y, with the expected output.
coref/data.py
process_mention_pairs_to_output
AndreFCruz/coref-web-platform
9
python
def process_mention_pairs_to_output(mention_pairs): '\n \n ' Y = np.empty(shape=(len(mention_pairs),), dtype='int32') for idx in range(len(mention_pairs)): Y[idx] = (1 if mention_pairs[idx].is_coreferent else 0) return Y
def process_mention_pairs_to_output(mention_pairs): '\n \n ' Y = np.empty(shape=(len(mention_pairs),), dtype='int32') for idx in range(len(mention_pairs)): Y[idx] = (1 if mention_pairs[idx].is_coreferent else 0) return Y<|docstring|>Processes the given mention pairs into a tensor, Y, with the expected output.<|endoftext|>
cdceb48315a20fe1aebccfd9238301e533bc713478faa3be0919a5d3e059a6d9
def process_mention_pairs_to_distance_features(mention_pairs, num_features=2): '\n Processes the given mention pairs into a vector of scalar features (e.g.\n sentence distance, mention distance, one-hot encoded pos tags, ...).\n ' bins = [0, 1, 2, 3, 4, 5, 8, 16, 32, 64, math.inf] bin_distance = (lambda x: bin_scalar(x, bins)) num_mention_pairs = len(mention_pairs) X_scalar = np.empty(shape=(num_mention_pairs, num_features), dtype='float32') for (idx, mp) in enumerate(mention_pairs): features = [bin_distance(mp.sent_dist)] if (num_features >= 2): features.append(bin_distance(mp.token_dist)) if (num_features == 3): pass elif (num_features > 3): raise RuntimeWarning('Invalid number of distance features: {}'.format(num_features)) X_scalar[idx] = features return X_scalar
Processes the given mention pairs into a vector of scalar features (e.g. sentence distance, mention distance, one-hot encoded pos tags, ...).
coref/data.py
process_mention_pairs_to_distance_features
AndreFCruz/coref-web-platform
9
python
def process_mention_pairs_to_distance_features(mention_pairs, num_features=2): '\n Processes the given mention pairs into a vector of scalar features (e.g.\n sentence distance, mention distance, one-hot encoded pos tags, ...).\n ' bins = [0, 1, 2, 3, 4, 5, 8, 16, 32, 64, math.inf] bin_distance = (lambda x: bin_scalar(x, bins)) num_mention_pairs = len(mention_pairs) X_scalar = np.empty(shape=(num_mention_pairs, num_features), dtype='float32') for (idx, mp) in enumerate(mention_pairs): features = [bin_distance(mp.sent_dist)] if (num_features >= 2): features.append(bin_distance(mp.token_dist)) if (num_features == 3): pass elif (num_features > 3): raise RuntimeWarning('Invalid number of distance features: {}'.format(num_features)) X_scalar[idx] = features return X_scalar
def process_mention_pairs_to_distance_features(mention_pairs, num_features=2): '\n Processes the given mention pairs into a vector of scalar features (e.g.\n sentence distance, mention distance, one-hot encoded pos tags, ...).\n ' bins = [0, 1, 2, 3, 4, 5, 8, 16, 32, 64, math.inf] bin_distance = (lambda x: bin_scalar(x, bins)) num_mention_pairs = len(mention_pairs) X_scalar = np.empty(shape=(num_mention_pairs, num_features), dtype='float32') for (idx, mp) in enumerate(mention_pairs): features = [bin_distance(mp.sent_dist)] if (num_features >= 2): features.append(bin_distance(mp.token_dist)) if (num_features == 3): pass elif (num_features > 3): raise RuntimeWarning('Invalid number of distance features: {}'.format(num_features)) X_scalar[idx] = features return X_scalar<|docstring|>Processes the given mention pairs into a vector of scalar features (e.g. sentence distance, mention distance, one-hot encoded pos tags, ...).<|endoftext|>
8bd94dc84fce392206d9518070126af4a5db23e8e9b7e9d08e22cb7fcfe54ebd
def process_mention_pairs_to_indices(mention_pairs, tokenizer, max_mention_length=50): '\n Processes the given mention pairs into an input dataset, parsing mention tokens\n into their indices in the embedding layer according to the given tokenizer.\n ' num_mention_pairs = len(mention_pairs) X_m1 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') X_m2 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') for (idx, mp) in enumerate(mention_pairs): (seq_m1, seq_m2) = tokenizer.texts_to_sequences([mp.m1.full_mention, mp.m2.full_mention]) (X_m1[idx], X_m2[idx]) = pad_sequences([seq_m1, seq_m2], maxlen=max_mention_length) return (X_m1, X_m2)
Processes the given mention pairs into an input dataset, parsing mention tokens into their indices in the embedding layer according to the given tokenizer.
coref/data.py
process_mention_pairs_to_indices
AndreFCruz/coref-web-platform
9
python
def process_mention_pairs_to_indices(mention_pairs, tokenizer, max_mention_length=50): '\n Processes the given mention pairs into an input dataset, parsing mention tokens\n into their indices in the embedding layer according to the given tokenizer.\n ' num_mention_pairs = len(mention_pairs) X_m1 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') X_m2 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') for (idx, mp) in enumerate(mention_pairs): (seq_m1, seq_m2) = tokenizer.texts_to_sequences([mp.m1.full_mention, mp.m2.full_mention]) (X_m1[idx], X_m2[idx]) = pad_sequences([seq_m1, seq_m2], maxlen=max_mention_length) return (X_m1, X_m2)
def process_mention_pairs_to_indices(mention_pairs, tokenizer, max_mention_length=50): '\n Processes the given mention pairs into an input dataset, parsing mention tokens\n into their indices in the embedding layer according to the given tokenizer.\n ' num_mention_pairs = len(mention_pairs) X_m1 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') X_m2 = np.empty(shape=(num_mention_pairs, max_mention_length), dtype='int32') for (idx, mp) in enumerate(mention_pairs): (seq_m1, seq_m2) = tokenizer.texts_to_sequences([mp.m1.full_mention, mp.m2.full_mention]) (X_m1[idx], X_m2[idx]) = pad_sequences([seq_m1, seq_m2], maxlen=max_mention_length) return (X_m1, X_m2)<|docstring|>Processes the given mention pairs into an input dataset, parsing mention tokens into their indices in the embedding layer according to the given tokenizer.<|endoftext|>
dc7b70183663b90dc8b6129a30d5169b09250c58bc6ab3813b3c22522ffb2d4a
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: "\n This resource allows for the definition of various types of plans as a\n sharable, consumable, and executable artifact. The resource is general enough\n to support the description of a broad range of clinical artifacts such as\n clinical decision support rules, order sets and protocols.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n path: The path to the element to be customized. This is the path on the resource\n that will hold the result of the calculation defined by the expression. The\n specified path SHALL be a FHIRPath resolveable on the specified target type of\n the ActivityDefinition, and SHALL consist only of identifiers, constant\n indexers, and a restricted subset of functions. The path is allowed to contain\n qualifiers (.) to traverse sub-elements, as well as indexers ([x]) to traverse\n multiple-cardinality sub-elements (see the [Simple FHIRPath\n Profile](fhirpath.html#simple) for full details).\n\n expression: An expression specifying the value of the customized element.\n\n " from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.expression import ExpressionSchema if ((max_recursion_limit and (nesting_list.count('PlanDefinition_DynamicValue') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['PlanDefinition_DynamicValue']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('path', StringType(), True), StructField('expression', ExpressionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
This resource allows for the definition of various types of plans as a sharable, consumable, and executable artifact. The resource is general enough to support the description of a broad range of clinical artifacts such as clinical decision support rules, order sets and protocols. id: Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). path: The path to the element to be customized. This is the path on the resource that will hold the result of the calculation defined by the expression. The specified path SHALL be a FHIRPath resolveable on the specified target type of the ActivityDefinition, and SHALL consist only of identifiers, constant indexers, and a restricted subset of functions. The path is allowed to contain qualifiers (.) to traverse sub-elements, as well as indexers ([x]) to traverse multiple-cardinality sub-elements (see the [Simple FHIRPath Profile](fhirpath.html#simple) for full details). expression: An expression specifying the value of the customized element.
spark_fhir_schemas/r4/complex_types/plandefinition_dynamicvalue.py
get_schema
imranq2/SparkFhirSchemas
2
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: "\n This resource allows for the definition of various types of plans as a\n sharable, consumable, and executable artifact. The resource is general enough\n to support the description of a broad range of clinical artifacts such as\n clinical decision support rules, order sets and protocols.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n path: The path to the element to be customized. This is the path on the resource\n that will hold the result of the calculation defined by the expression. The\n specified path SHALL be a FHIRPath resolveable on the specified target type of\n the ActivityDefinition, and SHALL consist only of identifiers, constant\n indexers, and a restricted subset of functions. The path is allowed to contain\n qualifiers (.) to traverse sub-elements, as well as indexers ([x]) to traverse\n multiple-cardinality sub-elements (see the [Simple FHIRPath\n Profile](fhirpath.html#simple) for full details).\n\n expression: An expression specifying the value of the customized element.\n\n " from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.expression import ExpressionSchema if ((max_recursion_limit and (nesting_list.count('PlanDefinition_DynamicValue') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['PlanDefinition_DynamicValue']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('path', StringType(), True), StructField('expression', ExpressionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: "\n This resource allows for the definition of various types of plans as a\n sharable, consumable, and executable artifact. The resource is general enough\n to support the description of a broad range of clinical artifacts such as\n clinical decision support rules, order sets and protocols.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n path: The path to the element to be customized. This is the path on the resource\n that will hold the result of the calculation defined by the expression. The\n specified path SHALL be a FHIRPath resolveable on the specified target type of\n the ActivityDefinition, and SHALL consist only of identifiers, constant\n indexers, and a restricted subset of functions. The path is allowed to contain\n qualifiers (.) to traverse sub-elements, as well as indexers ([x]) to traverse\n multiple-cardinality sub-elements (see the [Simple FHIRPath\n Profile](fhirpath.html#simple) for full details).\n\n expression: An expression specifying the value of the customized element.\n\n " from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.expression import ExpressionSchema if ((max_recursion_limit and (nesting_list.count('PlanDefinition_DynamicValue') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['PlanDefinition_DynamicValue']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('path', StringType(), True), StructField('expression', ExpressionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema<|docstring|>This resource allows for the definition of various types of plans as a sharable, consumable, and executable artifact. The resource is general enough to support the description of a broad range of clinical artifacts such as clinical decision support rules, order sets and protocols. id: Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). path: The path to the element to be customized. This is the path on the resource that will hold the result of the calculation defined by the expression. The specified path SHALL be a FHIRPath resolveable on the specified target type of the ActivityDefinition, and SHALL consist only of identifiers, constant indexers, and a restricted subset of functions. The path is allowed to contain qualifiers (.) to traverse sub-elements, as well as indexers ([x]) to traverse multiple-cardinality sub-elements (see the [Simple FHIRPath Profile](fhirpath.html#simple) for full details). expression: An expression specifying the value of the customized element.<|endoftext|>
6e6aeb1ed70cc7a8289fe93870252446c40e0caa716ae484e61979223fc4d7d2
def asyncReadDM4(*args, **kwargs): "\n Calls `readDM4` in a separate thread and executes it in the background.\n\n Parameters\n ----------\n Valid arguments are as for `readDM4()`. \n\n Returns\n -------\n future\n A ``concurrent.futures.Future()`` object. Calling ``future.result()`` \n will halt until the read is finished and returns the image and meta-data\n as per a normal call to `readMRC`. \n\n Example\n -------\n\n worker = asyncReadMRC( 'someones_file.mrc' )\n # Do some work\n mrcImage, mrcMeta = worker.result()\n " return _asyncExecutor.submit(readDM4, *args, **kwargs)
Calls `readDM4` in a separate thread and executes it in the background. Parameters ---------- Valid arguments are as for `readDM4()`. Returns ------- future A ``concurrent.futures.Future()`` object. Calling ``future.result()`` will halt until the read is finished and returns the image and meta-data as per a normal call to `readMRC`. Example ------- worker = asyncReadMRC( 'someones_file.mrc' ) # Do some work mrcImage, mrcMeta = worker.result()
mrcz/ioDM.py
asyncReadDM4
em-MRCZ/python-mrcz
13
python
def asyncReadDM4(*args, **kwargs): "\n Calls `readDM4` in a separate thread and executes it in the background.\n\n Parameters\n ----------\n Valid arguments are as for `readDM4()`. \n\n Returns\n -------\n future\n A ``concurrent.futures.Future()`` object. Calling ``future.result()`` \n will halt until the read is finished and returns the image and meta-data\n as per a normal call to `readMRC`. \n\n Example\n -------\n\n worker = asyncReadMRC( 'someones_file.mrc' )\n # Do some work\n mrcImage, mrcMeta = worker.result()\n " return _asyncExecutor.submit(readDM4, *args, **kwargs)
def asyncReadDM4(*args, **kwargs): "\n Calls `readDM4` in a separate thread and executes it in the background.\n\n Parameters\n ----------\n Valid arguments are as for `readDM4()`. \n\n Returns\n -------\n future\n A ``concurrent.futures.Future()`` object. Calling ``future.result()`` \n will halt until the read is finished and returns the image and meta-data\n as per a normal call to `readMRC`. \n\n Example\n -------\n\n worker = asyncReadMRC( 'someones_file.mrc' )\n # Do some work\n mrcImage, mrcMeta = worker.result()\n " return _asyncExecutor.submit(readDM4, *args, **kwargs)<|docstring|>Calls `readDM4` in a separate thread and executes it in the background. Parameters ---------- Valid arguments are as for `readDM4()`. Returns ------- future A ``concurrent.futures.Future()`` object. Calling ``future.result()`` will halt until the read is finished and returns the image and meta-data as per a normal call to `readMRC`. Example ------- worker = asyncReadMRC( 'someones_file.mrc' ) # Do some work mrcImage, mrcMeta = worker.result()<|endoftext|>
c9670631ed4cfe8619c85071c041eb08634b023fa3a66925d85612c7660a1964
def parseTag(self, parent): ' Parse a tag at the given file handle location ' tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b'' tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] loc_tag = self.f.tell() self.f.read(4) tag_ninfo = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(('Found tag: %s%s with %d elements' % (parent, tag_name, tag_ninfo))) split_tag_name = (parent + tag_name).split(b'/') imageIndex = np.int64(split_tag_name[2]) try: self.im[imageIndex] except IndexError: self.im.append(DMImage()) if (split_tag_name[3] == b'ImageData'): dimCount = 0 if (split_tag_name[4] == b'Calibrations'): if (split_tag_name[5] == b'Brightness'): if (split_tag_name[6] == b'Origin'): self.im[imageIndex].imageInfo['IntensityOrigin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Scale'): self.im[imageIndex].imageInfo['IntensityScale'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Units'): self.im[imageIndex].imageInfo['IntensityUnits'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('utf-8') elif (split_tag_name[5] == b'Dimension'): if (np.int64(split_tag_name[6]) == 0): currDim = 'X' elif (np.int64(split_tag_name[6]) == 1): currDim = 'Y' elif (np.int64(split_tag_name[6]) == 2): currDim = 'Z' else: raise ValueError(((('DM4Import: Unknown dimension ' + str(split_tag_name[6])) + ' at location ') + hex(self.f.tell()))) if (split_tag_name[7] == b'Origin'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Origin')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Scale'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Scale')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Units'): try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('utf-8') except: try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('ISO-8859-1') except: pass pass elif (split_tag_name[4] == b'Data'): if self.verbose: print(((('Found image %d' % imageIndex) + ' at offset : ') + str(self.f.tell()))) self.im[imageIndex].imageData = self.retrieveTagData(tag_ninfo) pass elif (split_tag_name[4] == b'Dimensions'): self.im[imageIndex].shape.append(self.retrieveTagData(tag_ninfo)) dimCount += 1 pass pass elif (split_tag_name[3] == b'ImageTags'): if (split_tag_name[4] == b'Acquisition'): if ((split_tag_name[(- 1)] == b'Horizontal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['HorzFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Vertical Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['VertFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Diagonal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['DiagFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Pixel Size (um)') and (split_tag_name[(- 3)] == b'Device')): self.im[imageIndex].imageInfo['DetectorPixelSize'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Exposure') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['Exposure'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'hbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['HorzBin'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'vbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['VertBin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Calibration'): if (split_tag_name[5] == b'Dose Rate'): if (split_tag_name[6] == b'Calibration'): if (not ('DoseRateCalibration' in self.im[imageIndex].imageInfo)): self.im[imageIndex].imageInfo['DoseRateCalibration'] = [] try: self.im[imageIndex].imageInfo['DoseRateCalibration'].append(self.retrieveTagData(tag_ninfo)) except: print('FAILED TO IMPORT DOSERATECALIBRATION') elif (split_tag_name[4] == b'Microscope Info'): if (split_tag_name[5] == b'Actual Magnification'): self.im[imageIndex].imageInfo['ActualMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Indicated Magnification'): self.im[imageIndex].imageInfo['NominalMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Cs(mm)'): self.im[imageIndex].imageInfo['C3'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Voltage'): self.im[imageIndex].imageInfo['Voltage'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Mode'): self.im[imageIndex].imageInfo['MicroscopeMode'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') pass elif (split_tag_name[4] == b'EELS Spectrometer'): if (split_tag_name[5] == b'Aperture index'): self.im[imageIndex].imageInfo['EELSApertureIndex'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Aperture label'): self.im[imageIndex].imageInfo['EELSApertureLabel'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') elif (split_tag_name[5] == b'Energy loss (eV)'): self.im[imageIndex].imageInfo['EELSEnergyLoss'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit inserted'): self.im[imageIndex].imageInfo['EELSSlitIn'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit width (eV)'): self.im[imageIndex].imageInfo['EELSSlitWidth'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Latitude-S'): if (split_tag_name[5] == b'Distance To Focus Position'): self.im[imageIndex].imageInfo['LatSDistanceToFocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Intended Defocus'): self.im[imageIndex].imageInfo['LatSIntendedDefocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Beam Diameter (nm)'): self.im[imageIndex].imageInfo['LatSBeamDiameter'] = self.retrieveTagData(tag_ninfo) self.f.seek((loc_tag + tag_fieldlen)) return
Parse a tag at the given file handle location
mrcz/ioDM.py
parseTag
em-MRCZ/python-mrcz
13
python
def parseTag(self, parent): ' ' tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] loc_tag = self.f.tell() self.f.read(4) tag_ninfo = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(('Found tag: %s%s with %d elements' % (parent, tag_name, tag_ninfo))) split_tag_name = (parent + tag_name).split(b'/') imageIndex = np.int64(split_tag_name[2]) try: self.im[imageIndex] except IndexError: self.im.append(DMImage()) if (split_tag_name[3] == b'ImageData'): dimCount = 0 if (split_tag_name[4] == b'Calibrations'): if (split_tag_name[5] == b'Brightness'): if (split_tag_name[6] == b'Origin'): self.im[imageIndex].imageInfo['IntensityOrigin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Scale'): self.im[imageIndex].imageInfo['IntensityScale'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Units'): self.im[imageIndex].imageInfo['IntensityUnits'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('utf-8') elif (split_tag_name[5] == b'Dimension'): if (np.int64(split_tag_name[6]) == 0): currDim = 'X' elif (np.int64(split_tag_name[6]) == 1): currDim = 'Y' elif (np.int64(split_tag_name[6]) == 2): currDim = 'Z' else: raise ValueError(((('DM4Import: Unknown dimension ' + str(split_tag_name[6])) + ' at location ') + hex(self.f.tell()))) if (split_tag_name[7] == b'Origin'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Origin')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Scale'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Scale')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Units'): try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('utf-8') except: try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('ISO-8859-1') except: pass pass elif (split_tag_name[4] == b'Data'): if self.verbose: print(((('Found image %d' % imageIndex) + ' at offset : ') + str(self.f.tell()))) self.im[imageIndex].imageData = self.retrieveTagData(tag_ninfo) pass elif (split_tag_name[4] == b'Dimensions'): self.im[imageIndex].shape.append(self.retrieveTagData(tag_ninfo)) dimCount += 1 pass pass elif (split_tag_name[3] == b'ImageTags'): if (split_tag_name[4] == b'Acquisition'): if ((split_tag_name[(- 1)] == b'Horizontal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['HorzFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Vertical Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['VertFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Diagonal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['DiagFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Pixel Size (um)') and (split_tag_name[(- 3)] == b'Device')): self.im[imageIndex].imageInfo['DetectorPixelSize'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Exposure') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['Exposure'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'hbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['HorzBin'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'vbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['VertBin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Calibration'): if (split_tag_name[5] == b'Dose Rate'): if (split_tag_name[6] == b'Calibration'): if (not ('DoseRateCalibration' in self.im[imageIndex].imageInfo)): self.im[imageIndex].imageInfo['DoseRateCalibration'] = [] try: self.im[imageIndex].imageInfo['DoseRateCalibration'].append(self.retrieveTagData(tag_ninfo)) except: print('FAILED TO IMPORT DOSERATECALIBRATION') elif (split_tag_name[4] == b'Microscope Info'): if (split_tag_name[5] == b'Actual Magnification'): self.im[imageIndex].imageInfo['ActualMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Indicated Magnification'): self.im[imageIndex].imageInfo['NominalMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Cs(mm)'): self.im[imageIndex].imageInfo['C3'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Voltage'): self.im[imageIndex].imageInfo['Voltage'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Mode'): self.im[imageIndex].imageInfo['MicroscopeMode'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') pass elif (split_tag_name[4] == b'EELS Spectrometer'): if (split_tag_name[5] == b'Aperture index'): self.im[imageIndex].imageInfo['EELSApertureIndex'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Aperture label'): self.im[imageIndex].imageInfo['EELSApertureLabel'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') elif (split_tag_name[5] == b'Energy loss (eV)'): self.im[imageIndex].imageInfo['EELSEnergyLoss'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit inserted'): self.im[imageIndex].imageInfo['EELSSlitIn'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit width (eV)'): self.im[imageIndex].imageInfo['EELSSlitWidth'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Latitude-S'): if (split_tag_name[5] == b'Distance To Focus Position'): self.im[imageIndex].imageInfo['LatSDistanceToFocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Intended Defocus'): self.im[imageIndex].imageInfo['LatSIntendedDefocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Beam Diameter (nm)'): self.im[imageIndex].imageInfo['LatSBeamDiameter'] = self.retrieveTagData(tag_ninfo) self.f.seek((loc_tag + tag_fieldlen)) return
def parseTag(self, parent): ' ' tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] loc_tag = self.f.tell() self.f.read(4) tag_ninfo = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(('Found tag: %s%s with %d elements' % (parent, tag_name, tag_ninfo))) split_tag_name = (parent + tag_name).split(b'/') imageIndex = np.int64(split_tag_name[2]) try: self.im[imageIndex] except IndexError: self.im.append(DMImage()) if (split_tag_name[3] == b'ImageData'): dimCount = 0 if (split_tag_name[4] == b'Calibrations'): if (split_tag_name[5] == b'Brightness'): if (split_tag_name[6] == b'Origin'): self.im[imageIndex].imageInfo['IntensityOrigin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Scale'): self.im[imageIndex].imageInfo['IntensityScale'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[6] == b'Units'): self.im[imageIndex].imageInfo['IntensityUnits'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('utf-8') elif (split_tag_name[5] == b'Dimension'): if (np.int64(split_tag_name[6]) == 0): currDim = 'X' elif (np.int64(split_tag_name[6]) == 1): currDim = 'Y' elif (np.int64(split_tag_name[6]) == 2): currDim = 'Z' else: raise ValueError(((('DM4Import: Unknown dimension ' + str(split_tag_name[6])) + ' at location ') + hex(self.f.tell()))) if (split_tag_name[7] == b'Origin'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Origin')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Scale'): self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Scale')] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[7] == b'Units'): try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('utf-8') except: try: self.im[imageIndex].imageInfo[(('Dim' + currDim) + 'Units')] = self.retrieveTagData(tag_ninfo).astype(np.int16).tostring().decode('ISO-8859-1') except: pass pass elif (split_tag_name[4] == b'Data'): if self.verbose: print(((('Found image %d' % imageIndex) + ' at offset : ') + str(self.f.tell()))) self.im[imageIndex].imageData = self.retrieveTagData(tag_ninfo) pass elif (split_tag_name[4] == b'Dimensions'): self.im[imageIndex].shape.append(self.retrieveTagData(tag_ninfo)) dimCount += 1 pass pass elif (split_tag_name[3] == b'ImageTags'): if (split_tag_name[4] == b'Acquisition'): if ((split_tag_name[(- 1)] == b'Horizontal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['HorzFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Vertical Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['VertFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Diagonal Flip') and (split_tag_name[(- 4)] == b'Device')): self.im[imageIndex].imageInfo['DiagFlip'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Pixel Size (um)') and (split_tag_name[(- 3)] == b'Device')): self.im[imageIndex].imageInfo['DetectorPixelSize'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'Exposure') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['Exposure'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'hbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['HorzBin'] = self.retrieveTagData(tag_ninfo) elif ((split_tag_name[(- 1)] == b'vbin') and (split_tag_name[(- 2)] == b'Detector')): self.im[imageIndex].imageInfo['VertBin'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Calibration'): if (split_tag_name[5] == b'Dose Rate'): if (split_tag_name[6] == b'Calibration'): if (not ('DoseRateCalibration' in self.im[imageIndex].imageInfo)): self.im[imageIndex].imageInfo['DoseRateCalibration'] = [] try: self.im[imageIndex].imageInfo['DoseRateCalibration'].append(self.retrieveTagData(tag_ninfo)) except: print('FAILED TO IMPORT DOSERATECALIBRATION') elif (split_tag_name[4] == b'Microscope Info'): if (split_tag_name[5] == b'Actual Magnification'): self.im[imageIndex].imageInfo['ActualMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Indicated Magnification'): self.im[imageIndex].imageInfo['NominalMag'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Cs(mm)'): self.im[imageIndex].imageInfo['C3'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Voltage'): self.im[imageIndex].imageInfo['Voltage'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Mode'): self.im[imageIndex].imageInfo['MicroscopeMode'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') pass elif (split_tag_name[4] == b'EELS Spectrometer'): if (split_tag_name[5] == b'Aperture index'): self.im[imageIndex].imageInfo['EELSApertureIndex'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Aperture label'): self.im[imageIndex].imageInfo['EELSApertureLabel'] = self.retrieveTagData(tag_ninfo).astype(np.uint8).tostring().decode('ascii') elif (split_tag_name[5] == b'Energy loss (eV)'): self.im[imageIndex].imageInfo['EELSEnergyLoss'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit inserted'): self.im[imageIndex].imageInfo['EELSSlitIn'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Slit width (eV)'): self.im[imageIndex].imageInfo['EELSSlitWidth'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[4] == b'Latitude-S'): if (split_tag_name[5] == b'Distance To Focus Position'): self.im[imageIndex].imageInfo['LatSDistanceToFocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Intended Defocus'): self.im[imageIndex].imageInfo['LatSIntendedDefocus'] = self.retrieveTagData(tag_ninfo) elif (split_tag_name[5] == b'Beam Diameter (nm)'): self.im[imageIndex].imageInfo['LatSBeamDiameter'] = self.retrieveTagData(tag_ninfo) self.f.seek((loc_tag + tag_fieldlen)) return<|docstring|>Parse a tag at the given file handle location<|endoftext|>
83ee64f9f602dd4c58614d1b4248efca1f8e2b61acc54e5a653670443a19de9a
def retrieveTagData(self, tag_ninfo): ' Get the actual data from the tag, which is then stored in one of the dicts or imageData ' tag_loc = self.f.tell() tag_infos = np.fromfile(self.f, dtype='>i8', count=tag_ninfo) for K in np.arange(0, tag_infos.size): if (tag_infos[K] == 15): self.f.seek(((tag_loc + (8 * K)) + 16)) struct_fieldcount = np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype = [] struct_fieldval = [] for J in np.arange(0, struct_fieldcount): np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype.append(self.getTagDType(np.fromfile(self.f, dtype='>i8', count=1))) for J in np.arange(0, struct_fieldcount): struct_fieldval.append(np.fromfile(self.f, dtype=struct_fielddtype[J], count=1)[0]) return struct_fieldval elif (tag_infos[K] == 20): if self.verbose: print('Found array') try: K += 1 array_dtype = self.getTagDType(tag_infos[K]) K += 1 array_ncount = tag_infos[K] if self.verbose: print(((('Array dtype: ' + array_dtype) + ', array ncount: ') + str(array_ncount))) if (array_ncount > 0): return np.fromfile(self.f, dtype=array_dtype, count=array_ncount) except IndexError: pass pass elif (tag_infos[K] == 9): tag_char = self.f.read(1) if self.verbose: print(('Found char: ' + tag_char)) pass elif (tag_infos[K] == 18): if self.verbose: print('FIXME Found string') pass else: tag_dtype = self.getTagDType(tag_infos[K]) if (tag_dtype != ''): tag_data = np.fromfile(self.f, dtype=tag_dtype, count=1)[0] if self.verbose: print(((('Singleton: ' + tag_dtype) + ': ') + str(tag_data))) return tag_data pass
Get the actual data from the tag, which is then stored in one of the dicts or imageData
mrcz/ioDM.py
retrieveTagData
em-MRCZ/python-mrcz
13
python
def retrieveTagData(self, tag_ninfo): ' ' tag_loc = self.f.tell() tag_infos = np.fromfile(self.f, dtype='>i8', count=tag_ninfo) for K in np.arange(0, tag_infos.size): if (tag_infos[K] == 15): self.f.seek(((tag_loc + (8 * K)) + 16)) struct_fieldcount = np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype = [] struct_fieldval = [] for J in np.arange(0, struct_fieldcount): np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype.append(self.getTagDType(np.fromfile(self.f, dtype='>i8', count=1))) for J in np.arange(0, struct_fieldcount): struct_fieldval.append(np.fromfile(self.f, dtype=struct_fielddtype[J], count=1)[0]) return struct_fieldval elif (tag_infos[K] == 20): if self.verbose: print('Found array') try: K += 1 array_dtype = self.getTagDType(tag_infos[K]) K += 1 array_ncount = tag_infos[K] if self.verbose: print(((('Array dtype: ' + array_dtype) + ', array ncount: ') + str(array_ncount))) if (array_ncount > 0): return np.fromfile(self.f, dtype=array_dtype, count=array_ncount) except IndexError: pass pass elif (tag_infos[K] == 9): tag_char = self.f.read(1) if self.verbose: print(('Found char: ' + tag_char)) pass elif (tag_infos[K] == 18): if self.verbose: print('FIXME Found string') pass else: tag_dtype = self.getTagDType(tag_infos[K]) if (tag_dtype != ): tag_data = np.fromfile(self.f, dtype=tag_dtype, count=1)[0] if self.verbose: print(((('Singleton: ' + tag_dtype) + ': ') + str(tag_data))) return tag_data pass
def retrieveTagData(self, tag_ninfo): ' ' tag_loc = self.f.tell() tag_infos = np.fromfile(self.f, dtype='>i8', count=tag_ninfo) for K in np.arange(0, tag_infos.size): if (tag_infos[K] == 15): self.f.seek(((tag_loc + (8 * K)) + 16)) struct_fieldcount = np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype = [] struct_fieldval = [] for J in np.arange(0, struct_fieldcount): np.fromfile(self.f, dtype='>i8', count=1) struct_fielddtype.append(self.getTagDType(np.fromfile(self.f, dtype='>i8', count=1))) for J in np.arange(0, struct_fieldcount): struct_fieldval.append(np.fromfile(self.f, dtype=struct_fielddtype[J], count=1)[0]) return struct_fieldval elif (tag_infos[K] == 20): if self.verbose: print('Found array') try: K += 1 array_dtype = self.getTagDType(tag_infos[K]) K += 1 array_ncount = tag_infos[K] if self.verbose: print(((('Array dtype: ' + array_dtype) + ', array ncount: ') + str(array_ncount))) if (array_ncount > 0): return np.fromfile(self.f, dtype=array_dtype, count=array_ncount) except IndexError: pass pass elif (tag_infos[K] == 9): tag_char = self.f.read(1) if self.verbose: print(('Found char: ' + tag_char)) pass elif (tag_infos[K] == 18): if self.verbose: print('FIXME Found string') pass else: tag_dtype = self.getTagDType(tag_infos[K]) if (tag_dtype != ): tag_data = np.fromfile(self.f, dtype=tag_dtype, count=1)[0] if self.verbose: print(((('Singleton: ' + tag_dtype) + ': ') + str(tag_data))) return tag_data pass<|docstring|>Get the actual data from the tag, which is then stored in one of the dicts or imageData<|endoftext|>
78bf3ff9c8140b66afed7cbfc0ad3d21fffc52a77ac00e160fedbb4a8ea5d3fd
def discardTag(self): " Quickly parse to the end of tag that we don't care about its information " tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] self.f.seek((self.f.tell() + tag_namelen)) tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] self.f.seek((self.f.tell() + tag_fieldlen)) return
Quickly parse to the end of tag that we don't care about its information
mrcz/ioDM.py
discardTag
em-MRCZ/python-mrcz
13
python
def discardTag(self): " " tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] self.f.seek((self.f.tell() + tag_namelen)) tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] self.f.seek((self.f.tell() + tag_fieldlen)) return
def discardTag(self): " " tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] self.f.seek((self.f.tell() + tag_namelen)) tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] self.f.seek((self.f.tell() + tag_fieldlen)) return<|docstring|>Quickly parse to the end of tag that we don't care about its information<|endoftext|>
c3baee0890559190a66b405c3b6e91b6239cc92e4d41a2b77c4bd9161c7a06aa
def parseTagDir(self, parent): ' Parse a tag directory at the given file handle location ' try: tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b'' tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover position in file') return self.f if (not bool(tag_name)): if self.verbose: print('Found empty tag') tag_depth = int((parent.count(b'/') - 1)) tag_name = bytearray(str(self._unnamedIndices[tag_depth]).encode('ascii')) self._unnamedIndices[tag_depth] += 1 self._unnamedIndices[(tag_depth + 1):] = 0 loc_tagdir = self.f.tell() self.f.read(2) ntags_dir = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(((((('Found tag dir ' + str(parent)) + str(tag_name)) + ' with ') + str(ntags_dir)) + ' tags')) for I in np.arange(0, ntags_dir): try: subtag_type = np.fromfile(self.f, dtype='i1', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover') break if (subtag_type == 20): self.parseTagDir(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 21): self.parseTag(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 0): break self.f.seek((loc_tagdir + tag_fieldlen)) return
Parse a tag directory at the given file handle location
mrcz/ioDM.py
parseTagDir
em-MRCZ/python-mrcz
13
python
def parseTagDir(self, parent): ' ' try: tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover position in file') return self.f if (not bool(tag_name)): if self.verbose: print('Found empty tag') tag_depth = int((parent.count(b'/') - 1)) tag_name = bytearray(str(self._unnamedIndices[tag_depth]).encode('ascii')) self._unnamedIndices[tag_depth] += 1 self._unnamedIndices[(tag_depth + 1):] = 0 loc_tagdir = self.f.tell() self.f.read(2) ntags_dir = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(((((('Found tag dir ' + str(parent)) + str(tag_name)) + ' with ') + str(ntags_dir)) + ' tags')) for I in np.arange(0, ntags_dir): try: subtag_type = np.fromfile(self.f, dtype='i1', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover') break if (subtag_type == 20): self.parseTagDir(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 21): self.parseTag(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 0): break self.f.seek((loc_tagdir + tag_fieldlen)) return
def parseTagDir(self, parent): ' ' try: tag_namelen = np.fromfile(self.f, dtype='>i2', count=1)[0] if (tag_namelen > 0): tag_name = self.f.read(tag_namelen) else: tag_name = b tag_fieldlen = np.fromfile(self.f, dtype='>i8', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover position in file') return self.f if (not bool(tag_name)): if self.verbose: print('Found empty tag') tag_depth = int((parent.count(b'/') - 1)) tag_name = bytearray(str(self._unnamedIndices[tag_depth]).encode('ascii')) self._unnamedIndices[tag_depth] += 1 self._unnamedIndices[(tag_depth + 1):] = 0 loc_tagdir = self.f.tell() self.f.read(2) ntags_dir = np.fromfile(self.f, dtype='>i8', count=1)[0] if self.verbose: print(((((('Found tag dir ' + str(parent)) + str(tag_name)) + ' with ') + str(ntags_dir)) + ' tags')) for I in np.arange(0, ntags_dir): try: subtag_type = np.fromfile(self.f, dtype='i1', count=1)[0] except IndexError: if self.verbose: print('Caught IndexError, trying to recover') break if (subtag_type == 20): self.parseTagDir(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 21): self.parseTag(((bytes(parent) + bytes(tag_name)) + b'/')) elif (subtag_type == 0): break self.f.seek((loc_tagdir + tag_fieldlen)) return<|docstring|>Parse a tag directory at the given file handle location<|endoftext|>
9dbc5a343e409a092eaceab0b2e93f9ff14a9c8cdd0756aa540b411c1f840092
def execute(self, image): 'Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n lower-level feature maps conv4_3 and conv7 \n ' out = nn.relu(self.conv1_1(image)) out = nn.relu(self.conv1_2(out)) out = self.pool1(out) out = nn.relu(self.conv2_1(out)) out = nn.relu(self.conv2_2(out)) out = self.pool2(out) out = nn.relu(self.conv3_1(out)) out = nn.relu(self.conv3_2(out)) out = nn.relu(self.conv3_3(out)) out = self.pool3(out) out = nn.relu(self.conv4_1(out)) out = nn.relu(self.conv4_2(out)) out = nn.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = nn.relu(self.conv5_1(out)) out = nn.relu(self.conv5_2(out)) out = nn.relu(self.conv5_3(out)) out = self.pool5(out) out = nn.relu(self.conv6(out)) conv7_feats = nn.relu(self.conv7(out)) return (conv4_3_feats, conv7_feats)
Forward propagation. Args: image: images, a array of dimensions (N, 3, 300, 300) Return: lower-level feature maps conv4_3 and conv7
ssd/model.py
execute
qixuxiang/jittor-models
4
python
def execute(self, image): 'Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n lower-level feature maps conv4_3 and conv7 \n ' out = nn.relu(self.conv1_1(image)) out = nn.relu(self.conv1_2(out)) out = self.pool1(out) out = nn.relu(self.conv2_1(out)) out = nn.relu(self.conv2_2(out)) out = self.pool2(out) out = nn.relu(self.conv3_1(out)) out = nn.relu(self.conv3_2(out)) out = nn.relu(self.conv3_3(out)) out = self.pool3(out) out = nn.relu(self.conv4_1(out)) out = nn.relu(self.conv4_2(out)) out = nn.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = nn.relu(self.conv5_1(out)) out = nn.relu(self.conv5_2(out)) out = nn.relu(self.conv5_3(out)) out = self.pool5(out) out = nn.relu(self.conv6(out)) conv7_feats = nn.relu(self.conv7(out)) return (conv4_3_feats, conv7_feats)
def execute(self, image): 'Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n lower-level feature maps conv4_3 and conv7 \n ' out = nn.relu(self.conv1_1(image)) out = nn.relu(self.conv1_2(out)) out = self.pool1(out) out = nn.relu(self.conv2_1(out)) out = nn.relu(self.conv2_2(out)) out = self.pool2(out) out = nn.relu(self.conv3_1(out)) out = nn.relu(self.conv3_2(out)) out = nn.relu(self.conv3_3(out)) out = self.pool3(out) out = nn.relu(self.conv4_1(out)) out = nn.relu(self.conv4_2(out)) out = nn.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = nn.relu(self.conv5_1(out)) out = nn.relu(self.conv5_2(out)) out = nn.relu(self.conv5_3(out)) out = self.pool5(out) out = nn.relu(self.conv6(out)) conv7_feats = nn.relu(self.conv7(out)) return (conv4_3_feats, conv7_feats)<|docstring|>Forward propagation. Args: image: images, a array of dimensions (N, 3, 300, 300) Return: lower-level feature maps conv4_3 and conv7<|endoftext|>
cd45b6f9fce636c73dbd93cc310d49d0d516f2d882d5b436550122cb7695f53b
def init_conv2d(self): ' Initialize convolution parameters. ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)
Initialize convolution parameters.
ssd/model.py
init_conv2d
qixuxiang/jittor-models
4
python
def init_conv2d(self): ' ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)
def init_conv2d(self): ' ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)<|docstring|>Initialize convolution parameters.<|endoftext|>
976d45987a5e09102be9546aef0522c072b973b2d3ad5e677d50dafb13300430
def execute(self, conv7_feats): 'Forward propagation.\n\n Args:\n conv7_feats: lower-level conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n Return: \n higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2\n ' out = nn.relu(self.conv8_1(conv7_feats)) out = nn.relu(self.conv8_2(out)) conv8_2_feats = out out = nn.relu(self.conv9_1(out)) out = nn.relu(self.conv9_2(out)) conv9_2_feats = out out = nn.relu(self.conv10_1(out)) out = nn.relu(self.conv10_2(out)) conv10_2_feats = out out = nn.relu(self.conv11_1(out)) conv11_2_feats = nn.relu(self.conv11_2(out)) return (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats)
Forward propagation. Args: conv7_feats: lower-level conv7 feature map, a array of dimensions (N, 1024, 19, 19) Return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
ssd/model.py
execute
qixuxiang/jittor-models
4
python
def execute(self, conv7_feats): 'Forward propagation.\n\n Args:\n conv7_feats: lower-level conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n Return: \n higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2\n ' out = nn.relu(self.conv8_1(conv7_feats)) out = nn.relu(self.conv8_2(out)) conv8_2_feats = out out = nn.relu(self.conv9_1(out)) out = nn.relu(self.conv9_2(out)) conv9_2_feats = out out = nn.relu(self.conv10_1(out)) out = nn.relu(self.conv10_2(out)) conv10_2_feats = out out = nn.relu(self.conv11_1(out)) conv11_2_feats = nn.relu(self.conv11_2(out)) return (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats)
def execute(self, conv7_feats): 'Forward propagation.\n\n Args:\n conv7_feats: lower-level conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n Return: \n higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2\n ' out = nn.relu(self.conv8_1(conv7_feats)) out = nn.relu(self.conv8_2(out)) conv8_2_feats = out out = nn.relu(self.conv9_1(out)) out = nn.relu(self.conv9_2(out)) conv9_2_feats = out out = nn.relu(self.conv10_1(out)) out = nn.relu(self.conv10_2(out)) conv10_2_feats = out out = nn.relu(self.conv11_1(out)) conv11_2_feats = nn.relu(self.conv11_2(out)) return (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats)<|docstring|>Forward propagation. Args: conv7_feats: lower-level conv7 feature map, a array of dimensions (N, 1024, 19, 19) Return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2<|endoftext|>
bfc1e940a73cf5bf11a3d63c91a087d873da2cd58520211c59301a54eb543ed3
def __init__(self, n_classes): '\n Args:\n n_classes: number of different types of objects\n ' super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} self.loc_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * 4), kernel_size=3, padding=1) self.loc_conv7 = nn.Conv(1024, (n_boxes['conv7'] * 4), kernel_size=3, padding=1) self.loc_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * 4), kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * 4), kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * 4), kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * 4), kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * n_classes), kernel_size=3, padding=1) self.cl_conv7 = nn.Conv(1024, (n_boxes['conv7'] * n_classes), kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * n_classes), kernel_size=3, padding=1) self.init_conv2d()
Args: n_classes: number of different types of objects
ssd/model.py
__init__
qixuxiang/jittor-models
4
python
def __init__(self, n_classes): '\n Args:\n n_classes: number of different types of objects\n ' super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} self.loc_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * 4), kernel_size=3, padding=1) self.loc_conv7 = nn.Conv(1024, (n_boxes['conv7'] * 4), kernel_size=3, padding=1) self.loc_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * 4), kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * 4), kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * 4), kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * 4), kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * n_classes), kernel_size=3, padding=1) self.cl_conv7 = nn.Conv(1024, (n_boxes['conv7'] * n_classes), kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * n_classes), kernel_size=3, padding=1) self.init_conv2d()
def __init__(self, n_classes): '\n Args:\n n_classes: number of different types of objects\n ' super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} self.loc_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * 4), kernel_size=3, padding=1) self.loc_conv7 = nn.Conv(1024, (n_boxes['conv7'] * 4), kernel_size=3, padding=1) self.loc_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * 4), kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * 4), kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * 4), kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * 4), kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv(512, (n_boxes['conv4_3'] * n_classes), kernel_size=3, padding=1) self.cl_conv7 = nn.Conv(1024, (n_boxes['conv7'] * n_classes), kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv(512, (n_boxes['conv8_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv(256, (n_boxes['conv9_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv(256, (n_boxes['conv10_2'] * n_classes), kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv(256, (n_boxes['conv11_2'] * n_classes), kernel_size=3, padding=1) self.init_conv2d()<|docstring|>Args: n_classes: number of different types of objects<|endoftext|>
cd45b6f9fce636c73dbd93cc310d49d0d516f2d882d5b436550122cb7695f53b
def init_conv2d(self): ' Initialize convolution parameters. ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)
Initialize convolution parameters.
ssd/model.py
init_conv2d
qixuxiang/jittor-models
4
python
def init_conv2d(self): ' ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)
def init_conv2d(self): ' ' for c in self.children(): if isinstance(c, nn.Conv): init.gauss_(c.weight)<|docstring|>Initialize convolution parameters.<|endoftext|>
67dbebcad2b784b2a50cc85f2aa863ab7e0bd696bd4f475adfb9cce74e70a5d7
def execute(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): ' Forward propagation.\n\n Args:\n conv4_3_feats: conv4_3 feature map, a array of dimensions (N, 512, 38, 38)\n conv7_feats: conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n conv8_2_feats: conv8_2 feature map, a array of dimensions (N, 512, 10, 10)\n conv9_2_feats: conv9_2 feature map, a array of dimensions (N, 256, 5, 5)\n conv10_2_feats: conv10_2 feature map, a array of dimensions (N, 256, 3, 3)\n conv11_2_feats: conv11_2 feature map, a array of dimensions (N, 256, 1, 1)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' batch_size = conv4_3_feats.shape[0] l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = jt.transpose(l_conv4_3, [0, 2, 3, 1]) l_conv4_3 = jt.reshape(l_conv4_3, [batch_size, (- 1), 4]) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = jt.transpose(l_conv7, [0, 2, 3, 1]) l_conv7 = jt.reshape(l_conv7, [batch_size, (- 1), 4]) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = jt.transpose(l_conv8_2, [0, 2, 3, 1]) l_conv8_2 = jt.reshape(l_conv8_2, [batch_size, (- 1), 4]) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = jt.transpose(l_conv9_2, [0, 2, 3, 1]) l_conv9_2 = jt.reshape(l_conv9_2, [batch_size, (- 1), 4]) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = jt.transpose(l_conv10_2, [0, 2, 3, 1]) l_conv10_2 = jt.reshape(l_conv10_2, [batch_size, (- 1), 4]) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = jt.transpose(l_conv11_2, [0, 2, 3, 1]) l_conv11_2 = jt.reshape(l_conv11_2, [batch_size, (- 1), 4]) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = jt.transpose(c_conv4_3, [0, 2, 3, 1]) c_conv4_3 = jt.reshape(c_conv4_3, [batch_size, (- 1), self.n_classes]) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = jt.transpose(c_conv7, [0, 2, 3, 1]) c_conv7 = jt.reshape(c_conv7, [batch_size, (- 1), self.n_classes]) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = jt.transpose(c_conv8_2, [0, 2, 3, 1]) c_conv8_2 = jt.reshape(c_conv8_2, [batch_size, (- 1), self.n_classes]) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = jt.transpose(c_conv9_2, [0, 2, 3, 1]) c_conv9_2 = jt.reshape(c_conv9_2, [batch_size, (- 1), self.n_classes]) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = jt.transpose(c_conv10_2, [0, 2, 3, 1]) c_conv10_2 = jt.reshape(c_conv10_2, [batch_size, (- 1), self.n_classes]) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = jt.transpose(c_conv11_2, [0, 2, 3, 1]) c_conv11_2 = jt.reshape(c_conv11_2, [batch_size, (- 1), self.n_classes]) locs = jt.contrib.concat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) classes_scores = jt.contrib.concat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) return (locs, classes_scores)
Forward propagation. Args: conv4_3_feats: conv4_3 feature map, a array of dimensions (N, 512, 38, 38) conv7_feats: conv7 feature map, a array of dimensions (N, 1024, 19, 19) conv8_2_feats: conv8_2 feature map, a array of dimensions (N, 512, 10, 10) conv9_2_feats: conv9_2 feature map, a array of dimensions (N, 256, 5, 5) conv10_2_feats: conv10_2 feature map, a array of dimensions (N, 256, 3, 3) conv11_2_feats: conv11_2 feature map, a array of dimensions (N, 256, 1, 1) Return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
ssd/model.py
execute
qixuxiang/jittor-models
4
python
def execute(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): ' Forward propagation.\n\n Args:\n conv4_3_feats: conv4_3 feature map, a array of dimensions (N, 512, 38, 38)\n conv7_feats: conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n conv8_2_feats: conv8_2 feature map, a array of dimensions (N, 512, 10, 10)\n conv9_2_feats: conv9_2 feature map, a array of dimensions (N, 256, 5, 5)\n conv10_2_feats: conv10_2 feature map, a array of dimensions (N, 256, 3, 3)\n conv11_2_feats: conv11_2 feature map, a array of dimensions (N, 256, 1, 1)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' batch_size = conv4_3_feats.shape[0] l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = jt.transpose(l_conv4_3, [0, 2, 3, 1]) l_conv4_3 = jt.reshape(l_conv4_3, [batch_size, (- 1), 4]) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = jt.transpose(l_conv7, [0, 2, 3, 1]) l_conv7 = jt.reshape(l_conv7, [batch_size, (- 1), 4]) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = jt.transpose(l_conv8_2, [0, 2, 3, 1]) l_conv8_2 = jt.reshape(l_conv8_2, [batch_size, (- 1), 4]) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = jt.transpose(l_conv9_2, [0, 2, 3, 1]) l_conv9_2 = jt.reshape(l_conv9_2, [batch_size, (- 1), 4]) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = jt.transpose(l_conv10_2, [0, 2, 3, 1]) l_conv10_2 = jt.reshape(l_conv10_2, [batch_size, (- 1), 4]) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = jt.transpose(l_conv11_2, [0, 2, 3, 1]) l_conv11_2 = jt.reshape(l_conv11_2, [batch_size, (- 1), 4]) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = jt.transpose(c_conv4_3, [0, 2, 3, 1]) c_conv4_3 = jt.reshape(c_conv4_3, [batch_size, (- 1), self.n_classes]) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = jt.transpose(c_conv7, [0, 2, 3, 1]) c_conv7 = jt.reshape(c_conv7, [batch_size, (- 1), self.n_classes]) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = jt.transpose(c_conv8_2, [0, 2, 3, 1]) c_conv8_2 = jt.reshape(c_conv8_2, [batch_size, (- 1), self.n_classes]) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = jt.transpose(c_conv9_2, [0, 2, 3, 1]) c_conv9_2 = jt.reshape(c_conv9_2, [batch_size, (- 1), self.n_classes]) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = jt.transpose(c_conv10_2, [0, 2, 3, 1]) c_conv10_2 = jt.reshape(c_conv10_2, [batch_size, (- 1), self.n_classes]) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = jt.transpose(c_conv11_2, [0, 2, 3, 1]) c_conv11_2 = jt.reshape(c_conv11_2, [batch_size, (- 1), self.n_classes]) locs = jt.contrib.concat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) classes_scores = jt.contrib.concat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) return (locs, classes_scores)
def execute(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): ' Forward propagation.\n\n Args:\n conv4_3_feats: conv4_3 feature map, a array of dimensions (N, 512, 38, 38)\n conv7_feats: conv7 feature map, a array of dimensions (N, 1024, 19, 19)\n conv8_2_feats: conv8_2 feature map, a array of dimensions (N, 512, 10, 10)\n conv9_2_feats: conv9_2 feature map, a array of dimensions (N, 256, 5, 5)\n conv10_2_feats: conv10_2 feature map, a array of dimensions (N, 256, 3, 3)\n conv11_2_feats: conv11_2 feature map, a array of dimensions (N, 256, 1, 1)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' batch_size = conv4_3_feats.shape[0] l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = jt.transpose(l_conv4_3, [0, 2, 3, 1]) l_conv4_3 = jt.reshape(l_conv4_3, [batch_size, (- 1), 4]) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = jt.transpose(l_conv7, [0, 2, 3, 1]) l_conv7 = jt.reshape(l_conv7, [batch_size, (- 1), 4]) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = jt.transpose(l_conv8_2, [0, 2, 3, 1]) l_conv8_2 = jt.reshape(l_conv8_2, [batch_size, (- 1), 4]) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = jt.transpose(l_conv9_2, [0, 2, 3, 1]) l_conv9_2 = jt.reshape(l_conv9_2, [batch_size, (- 1), 4]) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = jt.transpose(l_conv10_2, [0, 2, 3, 1]) l_conv10_2 = jt.reshape(l_conv10_2, [batch_size, (- 1), 4]) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = jt.transpose(l_conv11_2, [0, 2, 3, 1]) l_conv11_2 = jt.reshape(l_conv11_2, [batch_size, (- 1), 4]) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = jt.transpose(c_conv4_3, [0, 2, 3, 1]) c_conv4_3 = jt.reshape(c_conv4_3, [batch_size, (- 1), self.n_classes]) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = jt.transpose(c_conv7, [0, 2, 3, 1]) c_conv7 = jt.reshape(c_conv7, [batch_size, (- 1), self.n_classes]) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = jt.transpose(c_conv8_2, [0, 2, 3, 1]) c_conv8_2 = jt.reshape(c_conv8_2, [batch_size, (- 1), self.n_classes]) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = jt.transpose(c_conv9_2, [0, 2, 3, 1]) c_conv9_2 = jt.reshape(c_conv9_2, [batch_size, (- 1), self.n_classes]) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = jt.transpose(c_conv10_2, [0, 2, 3, 1]) c_conv10_2 = jt.reshape(c_conv10_2, [batch_size, (- 1), self.n_classes]) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = jt.transpose(c_conv11_2, [0, 2, 3, 1]) c_conv11_2 = jt.reshape(c_conv11_2, [batch_size, (- 1), self.n_classes]) locs = jt.contrib.concat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) classes_scores = jt.contrib.concat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) return (locs, classes_scores)<|docstring|>Forward propagation. Args: conv4_3_feats: conv4_3 feature map, a array of dimensions (N, 512, 38, 38) conv7_feats: conv7 feature map, a array of dimensions (N, 1024, 19, 19) conv8_2_feats: conv8_2 feature map, a array of dimensions (N, 512, 10, 10) conv9_2_feats: conv9_2 feature map, a array of dimensions (N, 256, 5, 5) conv10_2_feats: conv10_2 feature map, a array of dimensions (N, 256, 3, 3) conv11_2_feats: conv11_2 feature map, a array of dimensions (N, 256, 1, 1) Return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image<|endoftext|>
535c7944fa47403869bc381897bcc3ce49a7028df38344aea820765ce7ac13ed
def execute(self, image): ' Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' (conv4_3_feats, conv7_feats) = self.base(image) norm = conv4_3_feats.sqr().sum(dim=1, keepdims=True).sqrt() conv4_3_feats = (conv4_3_feats / norm) conv4_3_feats = (conv4_3_feats * self.rescale_factors) (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) = self.aux_convs(conv7_feats) (locs, classes_scores) = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) return (locs, classes_scores)
Forward propagation. Args: image: images, a array of dimensions (N, 3, 300, 300) Return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
ssd/model.py
execute
qixuxiang/jittor-models
4
python
def execute(self, image): ' Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' (conv4_3_feats, conv7_feats) = self.base(image) norm = conv4_3_feats.sqr().sum(dim=1, keepdims=True).sqrt() conv4_3_feats = (conv4_3_feats / norm) conv4_3_feats = (conv4_3_feats * self.rescale_factors) (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) = self.aux_convs(conv7_feats) (locs, classes_scores) = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) return (locs, classes_scores)
def execute(self, image): ' Forward propagation.\n \n Args:\n image: images, a array of dimensions (N, 3, 300, 300)\n Return: \n 8732 locations and class scores (i.e. w.r.t each prior box) for each image\n ' (conv4_3_feats, conv7_feats) = self.base(image) norm = conv4_3_feats.sqr().sum(dim=1, keepdims=True).sqrt() conv4_3_feats = (conv4_3_feats / norm) conv4_3_feats = (conv4_3_feats * self.rescale_factors) (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) = self.aux_convs(conv7_feats) (locs, classes_scores) = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) return (locs, classes_scores)<|docstring|>Forward propagation. Args: image: images, a array of dimensions (N, 3, 300, 300) Return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image<|endoftext|>
ee21974f84f10e080c0f67c24b85f9974c1b9e26c11ba9d69d0b8f42e4208863
def create_prior_boxes(self): ' Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.\n Return: prior boxes in center-size coordinates, a array of dimensions (8732, 4)\n ' fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0, 0.5], 'conv11_2': [1.0, 2.0, 0.5]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for (k, fmap) in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = ((j + 0.5) / fmap_dims[fmap]) cy = ((i + 0.5) / fmap_dims[fmap]) for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, (obj_scales[fmap] * sqrt(ratio)), (obj_scales[fmap] / sqrt(ratio))]) if (ratio == 1.0): try: additional_scale = sqrt((obj_scales[fmap] * obj_scales[fmaps[(k + 1)]])) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = np.array(prior_boxes) prior_boxes = np.clip(prior_boxes, 0.0, 1.0) return prior_boxes
Create the 8732 prior (default) boxes for the SSD300, as defined in the paper. Return: prior boxes in center-size coordinates, a array of dimensions (8732, 4)
ssd/model.py
create_prior_boxes
qixuxiang/jittor-models
4
python
def create_prior_boxes(self): ' Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.\n Return: prior boxes in center-size coordinates, a array of dimensions (8732, 4)\n ' fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0, 0.5], 'conv11_2': [1.0, 2.0, 0.5]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for (k, fmap) in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = ((j + 0.5) / fmap_dims[fmap]) cy = ((i + 0.5) / fmap_dims[fmap]) for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, (obj_scales[fmap] * sqrt(ratio)), (obj_scales[fmap] / sqrt(ratio))]) if (ratio == 1.0): try: additional_scale = sqrt((obj_scales[fmap] * obj_scales[fmaps[(k + 1)]])) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = np.array(prior_boxes) prior_boxes = np.clip(prior_boxes, 0.0, 1.0) return prior_boxes
def create_prior_boxes(self): ' Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.\n Return: prior boxes in center-size coordinates, a array of dimensions (8732, 4)\n ' fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0, 0.5], 'conv11_2': [1.0, 2.0, 0.5]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for (k, fmap) in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = ((j + 0.5) / fmap_dims[fmap]) cy = ((i + 0.5) / fmap_dims[fmap]) for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, (obj_scales[fmap] * sqrt(ratio)), (obj_scales[fmap] / sqrt(ratio))]) if (ratio == 1.0): try: additional_scale = sqrt((obj_scales[fmap] * obj_scales[fmaps[(k + 1)]])) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = np.array(prior_boxes) prior_boxes = np.clip(prior_boxes, 0.0, 1.0) return prior_boxes<|docstring|>Create the 8732 prior (default) boxes for the SSD300, as defined in the paper. Return: prior boxes in center-size coordinates, a array of dimensions (8732, 4)<|endoftext|>
f9666465928d96cb9fe9a846f5ed5d681f321ab1d067d3c224a19a84bd0aabad
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): " Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.\n For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n min_score: minimum threshold for a box to be considered a match for a certain class\n max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS\n top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'\n \n Return: detections (boxes, labels, and scores), lists of length batch_size\n " batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] predicted_scores = nn.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() predicted_locs = predicted_locs.data predicted_scores = predicted_scores.data assert (n_priors == predicted_locs.shape[1]) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() for c in range(1, self.n_classes): class_scores = predicted_scores[i][(:, c)] score_above_min_score = (class_scores >= min_score) n_above_min_score = score_above_min_score.sum() if (n_above_min_score == 0): continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] sort_ind = np.argsort((- class_scores), axis=0) class_scores = class_scores[sort_ind] class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = np.zeros(n_above_min_score).astype('int') for box in range(class_decoded_locs.shape[0]): if (suppress[box] == 1): continue suppress = np.maximum(suppress, (overlap[box] > max_overlap)) suppress[box] = 0 image_boxes.append(class_decoded_locs[(1 - suppress).astype('bool')]) image_labels.append((int((1 - suppress).sum()) * [c])) image_scores.append(class_scores[(1 - suppress).astype('bool')]) if (len(image_boxes) == 0): image_boxes.append(np.array([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(np.array([0])) image_scores.append(np.array([0.0])) image_boxes = np.concatenate(image_boxes, 0) image_labels = np.concatenate(image_labels, 0) image_scores = np.concatenate(image_scores, 0) n_objects = image_scores.shape[0] if (n_objects > top_k): sort_ind = np.argsort((- image_scores), axis=0) image_scores = image_scores[sort_ind][:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return (all_images_boxes, all_images_labels, all_images_scores)
Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. Args: predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) min_score: minimum threshold for a box to be considered a match for a certain class max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' Return: detections (boxes, labels, and scores), lists of length batch_size
ssd/model.py
detect_objects
qixuxiang/jittor-models
4
python
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): " Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.\n For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n min_score: minimum threshold for a box to be considered a match for a certain class\n max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS\n top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'\n \n Return: detections (boxes, labels, and scores), lists of length batch_size\n " batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] predicted_scores = nn.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() predicted_locs = predicted_locs.data predicted_scores = predicted_scores.data assert (n_priors == predicted_locs.shape[1]) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() for c in range(1, self.n_classes): class_scores = predicted_scores[i][(:, c)] score_above_min_score = (class_scores >= min_score) n_above_min_score = score_above_min_score.sum() if (n_above_min_score == 0): continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] sort_ind = np.argsort((- class_scores), axis=0) class_scores = class_scores[sort_ind] class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = np.zeros(n_above_min_score).astype('int') for box in range(class_decoded_locs.shape[0]): if (suppress[box] == 1): continue suppress = np.maximum(suppress, (overlap[box] > max_overlap)) suppress[box] = 0 image_boxes.append(class_decoded_locs[(1 - suppress).astype('bool')]) image_labels.append((int((1 - suppress).sum()) * [c])) image_scores.append(class_scores[(1 - suppress).astype('bool')]) if (len(image_boxes) == 0): image_boxes.append(np.array([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(np.array([0])) image_scores.append(np.array([0.0])) image_boxes = np.concatenate(image_boxes, 0) image_labels = np.concatenate(image_labels, 0) image_scores = np.concatenate(image_scores, 0) n_objects = image_scores.shape[0] if (n_objects > top_k): sort_ind = np.argsort((- image_scores), axis=0) image_scores = image_scores[sort_ind][:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return (all_images_boxes, all_images_labels, all_images_scores)
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): " Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.\n For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n min_score: minimum threshold for a box to be considered a match for a certain class\n max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS\n top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'\n \n Return: detections (boxes, labels, and scores), lists of length batch_size\n " batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] predicted_scores = nn.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() predicted_locs = predicted_locs.data predicted_scores = predicted_scores.data assert (n_priors == predicted_locs.shape[1]) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() for c in range(1, self.n_classes): class_scores = predicted_scores[i][(:, c)] score_above_min_score = (class_scores >= min_score) n_above_min_score = score_above_min_score.sum() if (n_above_min_score == 0): continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] sort_ind = np.argsort((- class_scores), axis=0) class_scores = class_scores[sort_ind] class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = np.zeros(n_above_min_score).astype('int') for box in range(class_decoded_locs.shape[0]): if (suppress[box] == 1): continue suppress = np.maximum(suppress, (overlap[box] > max_overlap)) suppress[box] = 0 image_boxes.append(class_decoded_locs[(1 - suppress).astype('bool')]) image_labels.append((int((1 - suppress).sum()) * [c])) image_scores.append(class_scores[(1 - suppress).astype('bool')]) if (len(image_boxes) == 0): image_boxes.append(np.array([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(np.array([0])) image_scores.append(np.array([0.0])) image_boxes = np.concatenate(image_boxes, 0) image_labels = np.concatenate(image_labels, 0) image_scores = np.concatenate(image_scores, 0) n_objects = image_scores.shape[0] if (n_objects > top_k): sort_ind = np.argsort((- image_scores), axis=0) image_scores = image_scores[sort_ind][:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return (all_images_boxes, all_images_labels, all_images_scores)<|docstring|>Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. Args: predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) min_score: minimum threshold for a box to be considered a match for a certain class max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' Return: detections (boxes, labels, and scores), lists of length batch_size<|endoftext|>
5e59bc962f49428a9256cbfe0bee99326f93e7ff328871d1719bca5730c12001
def execute(self, predicted_locs, predicted_scores, boxes, labels): ' Forward propagation.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n boxes: true object bounding boxes in boundary coordinates, a list of N tensors\n labels: true object labels, a list of N tensors\n Return: multibox loss, a scalar\n ' batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] n_classes = predicted_scores.shape[2] assert (n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]) true_locs = np.zeros((batch_size, n_priors, 4)) true_classes = np.zeros((batch_size, n_priors)) for i in range(batch_size): n_objects = boxes[i].shape[0] overlap = find_jaccard_overlap(boxes[i], self.priors_xy) (object_for_each_prior, overlap_for_each_prior) = argmax(overlap, axis=0) (prior_for_each_object, _) = argmax(overlap, axis=1) object_for_each_prior[prior_for_each_object] = range(n_objects) overlap_for_each_prior[prior_for_each_object] = 1.0 label_for_each_prior = labels[i][object_for_each_prior] label_for_each_prior[(overlap_for_each_prior < self.threshold)] = 0 true_classes[i] = label_for_each_prior true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) true_classes = jt.array(true_classes).float32().stop_grad() true_locs = jt.array(true_locs).float32().stop_grad() positive_priors = (true_classes != 0) loc_loss = self.smooth_l1((predicted_locs * positive_priors.broadcast([1, 1, 4], [2])), (true_locs * positive_priors.broadcast([1, 1, 4], [2]))) loc_loss /= (positive_priors.float32().sum() * 4) n_positives = positive_priors.float32().sum(1) n_hard_negatives = (self.neg_pos_ratio * n_positives) conf_loss_all = self.cross_entropy(jt.reshape(predicted_scores, [(- 1), n_classes]), jt.reshape(true_classes, [(- 1)])) conf_loss_all = jt.reshape(conf_loss_all, [batch_size, n_priors]) conf_loss_pos = (conf_loss_all * positive_priors) conf_loss_neg = (conf_loss_all * (1 - positive_priors)) (_, conf_loss_neg) = conf_loss_neg.argsort(dim=1, descending=True) hardness_ranks = jt.array(range(n_priors)).broadcast([conf_loss_neg.shape[0], conf_loss_neg.shape[1]], [0]) hard_negatives = (hardness_ranks < n_hard_negatives.broadcast(hardness_ranks.shape, [1])) conf_loss_hard_neg = (conf_loss_neg * hard_negatives) conf_loss = ((conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.float32().sum()) return ((conf_loss + (self.alpha * loc_loss)), conf_loss, loc_loss)
Forward propagation. Args: predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) boxes: true object bounding boxes in boundary coordinates, a list of N tensors labels: true object labels, a list of N tensors Return: multibox loss, a scalar
ssd/model.py
execute
qixuxiang/jittor-models
4
python
def execute(self, predicted_locs, predicted_scores, boxes, labels): ' Forward propagation.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n boxes: true object bounding boxes in boundary coordinates, a list of N tensors\n labels: true object labels, a list of N tensors\n Return: multibox loss, a scalar\n ' batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] n_classes = predicted_scores.shape[2] assert (n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]) true_locs = np.zeros((batch_size, n_priors, 4)) true_classes = np.zeros((batch_size, n_priors)) for i in range(batch_size): n_objects = boxes[i].shape[0] overlap = find_jaccard_overlap(boxes[i], self.priors_xy) (object_for_each_prior, overlap_for_each_prior) = argmax(overlap, axis=0) (prior_for_each_object, _) = argmax(overlap, axis=1) object_for_each_prior[prior_for_each_object] = range(n_objects) overlap_for_each_prior[prior_for_each_object] = 1.0 label_for_each_prior = labels[i][object_for_each_prior] label_for_each_prior[(overlap_for_each_prior < self.threshold)] = 0 true_classes[i] = label_for_each_prior true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) true_classes = jt.array(true_classes).float32().stop_grad() true_locs = jt.array(true_locs).float32().stop_grad() positive_priors = (true_classes != 0) loc_loss = self.smooth_l1((predicted_locs * positive_priors.broadcast([1, 1, 4], [2])), (true_locs * positive_priors.broadcast([1, 1, 4], [2]))) loc_loss /= (positive_priors.float32().sum() * 4) n_positives = positive_priors.float32().sum(1) n_hard_negatives = (self.neg_pos_ratio * n_positives) conf_loss_all = self.cross_entropy(jt.reshape(predicted_scores, [(- 1), n_classes]), jt.reshape(true_classes, [(- 1)])) conf_loss_all = jt.reshape(conf_loss_all, [batch_size, n_priors]) conf_loss_pos = (conf_loss_all * positive_priors) conf_loss_neg = (conf_loss_all * (1 - positive_priors)) (_, conf_loss_neg) = conf_loss_neg.argsort(dim=1, descending=True) hardness_ranks = jt.array(range(n_priors)).broadcast([conf_loss_neg.shape[0], conf_loss_neg.shape[1]], [0]) hard_negatives = (hardness_ranks < n_hard_negatives.broadcast(hardness_ranks.shape, [1])) conf_loss_hard_neg = (conf_loss_neg * hard_negatives) conf_loss = ((conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.float32().sum()) return ((conf_loss + (self.alpha * loc_loss)), conf_loss, loc_loss)
def execute(self, predicted_locs, predicted_scores, boxes, labels): ' Forward propagation.\n\n Args:\n predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\n predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\n boxes: true object bounding boxes in boundary coordinates, a list of N tensors\n labels: true object labels, a list of N tensors\n Return: multibox loss, a scalar\n ' batch_size = predicted_locs.shape[0] n_priors = self.priors_cxcy.shape[0] n_classes = predicted_scores.shape[2] assert (n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]) true_locs = np.zeros((batch_size, n_priors, 4)) true_classes = np.zeros((batch_size, n_priors)) for i in range(batch_size): n_objects = boxes[i].shape[0] overlap = find_jaccard_overlap(boxes[i], self.priors_xy) (object_for_each_prior, overlap_for_each_prior) = argmax(overlap, axis=0) (prior_for_each_object, _) = argmax(overlap, axis=1) object_for_each_prior[prior_for_each_object] = range(n_objects) overlap_for_each_prior[prior_for_each_object] = 1.0 label_for_each_prior = labels[i][object_for_each_prior] label_for_each_prior[(overlap_for_each_prior < self.threshold)] = 0 true_classes[i] = label_for_each_prior true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) true_classes = jt.array(true_classes).float32().stop_grad() true_locs = jt.array(true_locs).float32().stop_grad() positive_priors = (true_classes != 0) loc_loss = self.smooth_l1((predicted_locs * positive_priors.broadcast([1, 1, 4], [2])), (true_locs * positive_priors.broadcast([1, 1, 4], [2]))) loc_loss /= (positive_priors.float32().sum() * 4) n_positives = positive_priors.float32().sum(1) n_hard_negatives = (self.neg_pos_ratio * n_positives) conf_loss_all = self.cross_entropy(jt.reshape(predicted_scores, [(- 1), n_classes]), jt.reshape(true_classes, [(- 1)])) conf_loss_all = jt.reshape(conf_loss_all, [batch_size, n_priors]) conf_loss_pos = (conf_loss_all * positive_priors) conf_loss_neg = (conf_loss_all * (1 - positive_priors)) (_, conf_loss_neg) = conf_loss_neg.argsort(dim=1, descending=True) hardness_ranks = jt.array(range(n_priors)).broadcast([conf_loss_neg.shape[0], conf_loss_neg.shape[1]], [0]) hard_negatives = (hardness_ranks < n_hard_negatives.broadcast(hardness_ranks.shape, [1])) conf_loss_hard_neg = (conf_loss_neg * hard_negatives) conf_loss = ((conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.float32().sum()) return ((conf_loss + (self.alpha * loc_loss)), conf_loss, loc_loss)<|docstring|>Forward propagation. Args: predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) boxes: true object bounding boxes in boundary coordinates, a list of N tensors labels: true object labels, a list of N tensors Return: multibox loss, a scalar<|endoftext|>
8e67c285bd57da4b0bbe6bb11ba16d43902c55882f0d04e7a1a9038a9290f005
def index(request): ' A view to render index.html page ' return render(request, 'home/index.html')
A view to render index.html page
home/views.py
index
kevin-ci/janeric2
1
python
def index(request): ' ' return render(request, 'home/index.html')
def index(request): ' ' return render(request, 'home/index.html')<|docstring|>A view to render index.html page<|endoftext|>
28b5e70f6079a50f8c958ff7ec5abd7d1131ea9b3e44cb8752b4ace919b06dbf
def __virtual__(): '\n Only make these states available if Open vSwitch module is available.\n ' if ('openvswitch.port_add' in __salt__): return True return (False, 'openvswitch module could not be loaded')
Only make these states available if Open vSwitch module is available.
salt/states/openvswitch_port.py
__virtual__
Flowdalic/salt
9,425
python
def __virtual__(): '\n \n ' if ('openvswitch.port_add' in __salt__): return True return (False, 'openvswitch module could not be loaded')
def __virtual__(): '\n \n ' if ('openvswitch.port_add' in __salt__): return True return (False, 'openvswitch module could not be loaded')<|docstring|>Only make these states available if Open vSwitch module is available.<|endoftext|>
fabed0db39f55d395e1b26ed97fed86012b45148e7e3a2e2faef17c75fb17d49
def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False): "\n Ensures that the named port exists on bridge, eventually creates it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre.\n id: Optional tunnel's key.\n remote: Remote endpoint's IP address.\n dst_port: Port to use when creating tunnelport in the switch.\n internal: Create an internal port if one does not exist\n\n " ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} tunnel_types = ('vlan', 'vxlan', 'gre') if (tunnel_type and (tunnel_type not in tunnel_types)): raise TypeError('The optional type argument must be one of these values: {}.'.format(str(tunnel_types))) bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) port_list = [] if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_exists'] = 'Port {} already exists.'.format(name) comments['comment_port_created'] = 'Port {} created on bridge {}.'.format(name, bridge) comments['comment_port_notcreated'] = 'Unable to create port {} on bridge {}.'.format(name, bridge) comments['changes_port_created'] = {name: {'old': 'No port named {} present.'.format(name), 'new': 'Created port {1} on bridge {0}.'.format(bridge, name)}} comments['comment_port_internal'] = 'Port {} already exists, but interface type has been changed to internal.'.format(name) comments['changes_port_internal'] = {'internal': {'old': False, 'new': True}} comments['comment_port_internal_not_changed'] = 'Port {} already exists, but the interface type could not be changed to internal.'.format(name) if tunnel_type: comments['comment_invalid_ip'] = 'Remote is not valid ip address.' if (tunnel_type == 'vlan'): comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.' comments['comment_vlan_invalid_name'] = 'Could not find network interface {}.'.format(name) comments['comment_vlan_port_exists'] = 'Port {} with access to VLAN {} already exists on bridge {}.'.format(name, id, bridge) comments['comment_vlan_created'] = 'Created port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['comment_vlan_notcreated'] = 'Unable to create port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['changes_vlan_created'] = {name: {'old': 'No port named {} with access to VLAN {} present on bridge {} present.'.format(name, id, bridge), 'new': 'Created port {1} with access to VLAN {2} on bridge {0}.'.format(bridge, name, id)}} elif (tunnel_type == 'gre'): comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.' comments['comment_gre_interface_exists'] = 'GRE tunnel interface {} with rempte ip {} and key {} already exists on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_created'] = 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {} with remote ip {} and key {} on bridge {} present.'.format(name, remote, id, bridge), 'new': 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge)}} elif (tunnel_type == 'vxlan'): comments['comment_dstport'] = (((' (dst_port' + str(dst_port)) + ')') if (0 < dst_port <= 65535) else '') comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.' comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {} with rempte ip {} and key {} already exists on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{} present.'.format(name, remote, id, bridge, comments['comment_dstport']), 'new': 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport'])}} def _check_vlan(): tag = __salt__['openvswitch.port_get_tag'](name) interfaces = __salt__['network.interfaces']() if (not (0 <= id <= 4095)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_id'] elif ((not internal) and (name not in interfaces)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_name'] elif (tag and (name in port_list)): try: if (int(tag[0]) == id): ret['result'] = True ret['comment'] = comments['comment_vlan_port_exists'] except (ValueError, KeyError): pass def _check_gre(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 32))): ret['result'] = False ret['comment'] = comments['comment_gre_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): interface_attroptions = (((('{key="' + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'gre') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_gre_interface_exists'] except KeyError: pass def _check_vxlan(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 64))): ret['result'] = False ret['comment'] = comments['comment_vxlan_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): opt_port = ((('dst_port="' + str(dst_port)) + '", ') if (0 < dst_port <= 65535) else '') interface_attroptions = (((('{{{0}key="'.format(opt_port) + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'vxlan') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_vxlan_interface_exists'] except KeyError: pass if __opts__['test']: if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vlan_created'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vxlan_created'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_gre_created'] elif (name in port_list): ret['result'] = True current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): ret['comment'] = comments['comment_port_internal'] else: ret['comment'] = comments['comment_port_exists'] else: ret['result'] = None ret['comment'] = comments['comment_port_created'] else: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] return ret if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal) if port_create_vlan: ret['result'] = True ret['comment'] = comments['comment_vlan_created'] ret['changes'] = comments['changes_vlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vlan_notcreated'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port) if port_create_vxlan: ret['result'] = True ret['comment'] = comments['comment_vxlan_created'] ret['changes'] = comments['changes_vxlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vxlan_notcreated'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote) if port_create_gre: ret['result'] = True ret['comment'] = comments['comment_gre_created'] ret['changes'] = comments['changes_gre_created'] else: ret['result'] = False ret['comment'] = comments['comment_gre_notcreated'] elif (name in port_list): current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): port_add = __salt__['openvswitch.port_add'](bridge, name, may_exist=True, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_internal'] ret['changes'] = comments['changes_port_internal'] else: ret['result'] = False ret['comment'] = comments['comment_port_internal_not_changed'] else: ret['result'] = True ret['comment'] = comments['comment_port_exists'] else: port_add = __salt__['openvswitch.port_add'](bridge, name, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_created'] ret['changes'] = comments['changes_port_created'] else: ret['result'] = False ret['comment'] = comments['comment_port_notcreated'] else: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] return ret
Ensures that the named port exists on bridge, eventually creates it. Args: name: The name of the port. bridge: The name of the bridge. tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre. id: Optional tunnel's key. remote: Remote endpoint's IP address. dst_port: Port to use when creating tunnelport in the switch. internal: Create an internal port if one does not exist
salt/states/openvswitch_port.py
present
Flowdalic/salt
9,425
python
def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False): "\n Ensures that the named port exists on bridge, eventually creates it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre.\n id: Optional tunnel's key.\n remote: Remote endpoint's IP address.\n dst_port: Port to use when creating tunnelport in the switch.\n internal: Create an internal port if one does not exist\n\n " ret = {'name': name, 'changes': {}, 'result': False, 'comment': } tunnel_types = ('vlan', 'vxlan', 'gre') if (tunnel_type and (tunnel_type not in tunnel_types)): raise TypeError('The optional type argument must be one of these values: {}.'.format(str(tunnel_types))) bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) port_list = [] if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_exists'] = 'Port {} already exists.'.format(name) comments['comment_port_created'] = 'Port {} created on bridge {}.'.format(name, bridge) comments['comment_port_notcreated'] = 'Unable to create port {} on bridge {}.'.format(name, bridge) comments['changes_port_created'] = {name: {'old': 'No port named {} present.'.format(name), 'new': 'Created port {1} on bridge {0}.'.format(bridge, name)}} comments['comment_port_internal'] = 'Port {} already exists, but interface type has been changed to internal.'.format(name) comments['changes_port_internal'] = {'internal': {'old': False, 'new': True}} comments['comment_port_internal_not_changed'] = 'Port {} already exists, but the interface type could not be changed to internal.'.format(name) if tunnel_type: comments['comment_invalid_ip'] = 'Remote is not valid ip address.' if (tunnel_type == 'vlan'): comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.' comments['comment_vlan_invalid_name'] = 'Could not find network interface {}.'.format(name) comments['comment_vlan_port_exists'] = 'Port {} with access to VLAN {} already exists on bridge {}.'.format(name, id, bridge) comments['comment_vlan_created'] = 'Created port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['comment_vlan_notcreated'] = 'Unable to create port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['changes_vlan_created'] = {name: {'old': 'No port named {} with access to VLAN {} present on bridge {} present.'.format(name, id, bridge), 'new': 'Created port {1} with access to VLAN {2} on bridge {0}.'.format(bridge, name, id)}} elif (tunnel_type == 'gre'): comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.' comments['comment_gre_interface_exists'] = 'GRE tunnel interface {} with rempte ip {} and key {} already exists on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_created'] = 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {} with remote ip {} and key {} on bridge {} present.'.format(name, remote, id, bridge), 'new': 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge)}} elif (tunnel_type == 'vxlan'): comments['comment_dstport'] = (((' (dst_port' + str(dst_port)) + ')') if (0 < dst_port <= 65535) else ) comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.' comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {} with rempte ip {} and key {} already exists on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{} present.'.format(name, remote, id, bridge, comments['comment_dstport']), 'new': 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport'])}} def _check_vlan(): tag = __salt__['openvswitch.port_get_tag'](name) interfaces = __salt__['network.interfaces']() if (not (0 <= id <= 4095)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_id'] elif ((not internal) and (name not in interfaces)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_name'] elif (tag and (name in port_list)): try: if (int(tag[0]) == id): ret['result'] = True ret['comment'] = comments['comment_vlan_port_exists'] except (ValueError, KeyError): pass def _check_gre(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 32))): ret['result'] = False ret['comment'] = comments['comment_gre_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): interface_attroptions = (((('{key="' + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'gre') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_gre_interface_exists'] except KeyError: pass def _check_vxlan(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 64))): ret['result'] = False ret['comment'] = comments['comment_vxlan_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): opt_port = ((('dst_port="' + str(dst_port)) + '", ') if (0 < dst_port <= 65535) else ) interface_attroptions = (((('{{{0}key="'.format(opt_port) + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'vxlan') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_vxlan_interface_exists'] except KeyError: pass if __opts__['test']: if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vlan_created'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vxlan_created'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_gre_created'] elif (name in port_list): ret['result'] = True current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): ret['comment'] = comments['comment_port_internal'] else: ret['comment'] = comments['comment_port_exists'] else: ret['result'] = None ret['comment'] = comments['comment_port_created'] else: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] return ret if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal) if port_create_vlan: ret['result'] = True ret['comment'] = comments['comment_vlan_created'] ret['changes'] = comments['changes_vlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vlan_notcreated'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port) if port_create_vxlan: ret['result'] = True ret['comment'] = comments['comment_vxlan_created'] ret['changes'] = comments['changes_vxlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vxlan_notcreated'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote) if port_create_gre: ret['result'] = True ret['comment'] = comments['comment_gre_created'] ret['changes'] = comments['changes_gre_created'] else: ret['result'] = False ret['comment'] = comments['comment_gre_notcreated'] elif (name in port_list): current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): port_add = __salt__['openvswitch.port_add'](bridge, name, may_exist=True, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_internal'] ret['changes'] = comments['changes_port_internal'] else: ret['result'] = False ret['comment'] = comments['comment_port_internal_not_changed'] else: ret['result'] = True ret['comment'] = comments['comment_port_exists'] else: port_add = __salt__['openvswitch.port_add'](bridge, name, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_created'] ret['changes'] = comments['changes_port_created'] else: ret['result'] = False ret['comment'] = comments['comment_port_notcreated'] else: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] return ret
def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False): "\n Ensures that the named port exists on bridge, eventually creates it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre.\n id: Optional tunnel's key.\n remote: Remote endpoint's IP address.\n dst_port: Port to use when creating tunnelport in the switch.\n internal: Create an internal port if one does not exist\n\n " ret = {'name': name, 'changes': {}, 'result': False, 'comment': } tunnel_types = ('vlan', 'vxlan', 'gre') if (tunnel_type and (tunnel_type not in tunnel_types)): raise TypeError('The optional type argument must be one of these values: {}.'.format(str(tunnel_types))) bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) port_list = [] if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_exists'] = 'Port {} already exists.'.format(name) comments['comment_port_created'] = 'Port {} created on bridge {}.'.format(name, bridge) comments['comment_port_notcreated'] = 'Unable to create port {} on bridge {}.'.format(name, bridge) comments['changes_port_created'] = {name: {'old': 'No port named {} present.'.format(name), 'new': 'Created port {1} on bridge {0}.'.format(bridge, name)}} comments['comment_port_internal'] = 'Port {} already exists, but interface type has been changed to internal.'.format(name) comments['changes_port_internal'] = {'internal': {'old': False, 'new': True}} comments['comment_port_internal_not_changed'] = 'Port {} already exists, but the interface type could not be changed to internal.'.format(name) if tunnel_type: comments['comment_invalid_ip'] = 'Remote is not valid ip address.' if (tunnel_type == 'vlan'): comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.' comments['comment_vlan_invalid_name'] = 'Could not find network interface {}.'.format(name) comments['comment_vlan_port_exists'] = 'Port {} with access to VLAN {} already exists on bridge {}.'.format(name, id, bridge) comments['comment_vlan_created'] = 'Created port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['comment_vlan_notcreated'] = 'Unable to create port {} with access to VLAN {} on bridge {}.'.format(name, id, bridge) comments['changes_vlan_created'] = {name: {'old': 'No port named {} with access to VLAN {} present on bridge {} present.'.format(name, id, bridge), 'new': 'Created port {1} with access to VLAN {2} on bridge {0}.'.format(bridge, name, id)}} elif (tunnel_type == 'gre'): comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.' comments['comment_gre_interface_exists'] = 'GRE tunnel interface {} with rempte ip {} and key {} already exists on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_created'] = 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge) comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {} with remote ip {} and key {} on bridge {} present.'.format(name, remote, id, bridge), 'new': 'Created GRE tunnel interface {} with remote ip {} and key {} on bridge {}.'.format(name, remote, id, bridge)}} elif (tunnel_type == 'vxlan'): comments['comment_dstport'] = (((' (dst_port' + str(dst_port)) + ')') if (0 < dst_port <= 65535) else ) comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.' comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {} with rempte ip {} and key {} already exists on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{} present.'.format(name, remote, id, bridge, comments['comment_dstport']), 'new': 'Created VXLAN tunnel interface {} with remote ip {} and key {} on bridge {}{}.'.format(name, remote, id, bridge, comments['comment_dstport'])}} def _check_vlan(): tag = __salt__['openvswitch.port_get_tag'](name) interfaces = __salt__['network.interfaces']() if (not (0 <= id <= 4095)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_id'] elif ((not internal) and (name not in interfaces)): ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_name'] elif (tag and (name in port_list)): try: if (int(tag[0]) == id): ret['result'] = True ret['comment'] = comments['comment_vlan_port_exists'] except (ValueError, KeyError): pass def _check_gre(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 32))): ret['result'] = False ret['comment'] = comments['comment_gre_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): interface_attroptions = (((('{key="' + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'gre') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_gre_interface_exists'] except KeyError: pass def _check_vxlan(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if (not (0 <= id <= (2 ** 64))): ret['result'] = False ret['comment'] = comments['comment_vxlan_invalid_id'] elif (not __salt__['dig.check_ip'](remote)): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif (interface_options and interface_type and (name in port_list)): opt_port = ((('dst_port="' + str(dst_port)) + '", ') if (0 < dst_port <= 65535) else ) interface_attroptions = (((('{{{0}key="'.format(opt_port) + str(id)) + '", remote_ip="') + str(remote)) + '"}') try: if ((interface_type[0] == 'vxlan') and (interface_options[0] == interface_attroptions)): ret['result'] = True ret['comment'] = comments['comment_vxlan_interface_exists'] except KeyError: pass if __opts__['test']: if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vlan_created'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_vxlan_created'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): ret['result'] = None ret['comment'] = comments['comment_gre_created'] elif (name in port_list): ret['result'] = True current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): ret['comment'] = comments['comment_port_internal'] else: ret['comment'] = comments['comment_port_exists'] else: ret['result'] = None ret['comment'] = comments['comment_port_created'] else: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] return ret if bridge_exists: if (tunnel_type == 'vlan'): _check_vlan() if (not ret['comment']): port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal) if port_create_vlan: ret['result'] = True ret['comment'] = comments['comment_vlan_created'] ret['changes'] = comments['changes_vlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vlan_notcreated'] elif (tunnel_type == 'vxlan'): _check_vxlan() if (not ret['comment']): port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port) if port_create_vxlan: ret['result'] = True ret['comment'] = comments['comment_vxlan_created'] ret['changes'] = comments['changes_vxlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vxlan_notcreated'] elif (tunnel_type == 'gre'): _check_gre() if (not ret['comment']): port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote) if port_create_gre: ret['result'] = True ret['comment'] = comments['comment_gre_created'] ret['changes'] = comments['changes_gre_created'] else: ret['result'] = False ret['comment'] = comments['comment_gre_notcreated'] elif (name in port_list): current_type = __salt__['openvswitch.interface_get_type'](name) if (internal and (current_type != ['internal'])): port_add = __salt__['openvswitch.port_add'](bridge, name, may_exist=True, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_internal'] ret['changes'] = comments['changes_port_internal'] else: ret['result'] = False ret['comment'] = comments['comment_port_internal_not_changed'] else: ret['result'] = True ret['comment'] = comments['comment_port_exists'] else: port_add = __salt__['openvswitch.port_add'](bridge, name, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_created'] ret['changes'] = comments['changes_port_created'] else: ret['result'] = False ret['comment'] = comments['comment_port_notcreated'] else: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] return ret<|docstring|>Ensures that the named port exists on bridge, eventually creates it. Args: name: The name of the port. bridge: The name of the bridge. tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre. id: Optional tunnel's key. remote: Remote endpoint's IP address. dst_port: Port to use when creating tunnelport in the switch. internal: Create an internal port if one does not exist<|endoftext|>
4b54bb9c56ea45151d5c2ccd3eb6dc03f4b981062102fb13fb80c9c90289f43e
def absent(name, bridge=None): '\n Ensures that the named port exists on bridge, eventually deletes it.\n If bridge is not set, port is removed from whatever bridge contains it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n\n ' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {} does not exist on bridge {}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {} may exist.'.format(name), 'new': 'Deleted port {}.'.format(name)}} if __opts__['test']: if (bridge and (not bridge_exists)): ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if (bridge and (not bridge_exists)): ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret
Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge.
salt/states/openvswitch_port.py
absent
Flowdalic/salt
9,425
python
def absent(name, bridge=None): '\n Ensures that the named port exists on bridge, eventually deletes it.\n If bridge is not set, port is removed from whatever bridge contains it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n\n ' ret = {'name': name, 'changes': {}, 'result': False, 'comment': } bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {} does not exist on bridge {}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {} may exist.'.format(name), 'new': 'Deleted port {}.'.format(name)}} if __opts__['test']: if (bridge and (not bridge_exists)): ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if (bridge and (not bridge_exists)): ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret
def absent(name, bridge=None): '\n Ensures that the named port exists on bridge, eventually deletes it.\n If bridge is not set, port is removed from whatever bridge contains it.\n\n Args:\n name: The name of the port.\n bridge: The name of the bridge.\n\n ' ret = {'name': name, 'changes': {}, 'result': False, 'comment': } bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] comments = {} comments['comment_bridge_notexists'] = 'Bridge {} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {} does not exist on bridge {}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {} may exist.'.format(name), 'new': 'Deleted port {}.'.format(name)}} if __opts__['test']: if (bridge and (not bridge_exists)): ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if (bridge and (not bridge_exists)): ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret<|docstring|>Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge.<|endoftext|>
68bb1986b04ed4911b0d83b0937ba427ebc107586157aa445a85521a27c98dbd
def makeOBJFile(points, filename): 'OBJ file creation test that puts cube at each point' outputFile = open(filename, 'w') endLine = '\n' outputFile.write('g cube') outputFile.write(endLine) numCubeVertices = len(v) for p in points: for i in range(numCubeVertices): outputFile.write(('v %f %f %f' % ((v[i][0] + p[0]), (v[i][1] + p[1]), (v[i][2] + p[2])))) outputFile.write(endLine) for vertexNormalString in vn: outputFile.write(vertexNormalString) outputFile.write(endLine) for cubeIndex in range(len(points)): vertexIndexOffset = (cubeIndex * numCubeVertices) for faceIndex in range(len(f)): face = f[faceIndex].makeFace(vertexIndexOffset) outputFile.write(('f ' + face.toString())) outputFile.write(endLine) outputFile.close()
OBJ file creation test that puts cube at each point
cytoseg/obj_tools.py
makeOBJFile
slash-segmentation/DP2
0
python
def makeOBJFile(points, filename): outputFile = open(filename, 'w') endLine = '\n' outputFile.write('g cube') outputFile.write(endLine) numCubeVertices = len(v) for p in points: for i in range(numCubeVertices): outputFile.write(('v %f %f %f' % ((v[i][0] + p[0]), (v[i][1] + p[1]), (v[i][2] + p[2])))) outputFile.write(endLine) for vertexNormalString in vn: outputFile.write(vertexNormalString) outputFile.write(endLine) for cubeIndex in range(len(points)): vertexIndexOffset = (cubeIndex * numCubeVertices) for faceIndex in range(len(f)): face = f[faceIndex].makeFace(vertexIndexOffset) outputFile.write(('f ' + face.toString())) outputFile.write(endLine) outputFile.close()
def makeOBJFile(points, filename): outputFile = open(filename, 'w') endLine = '\n' outputFile.write('g cube') outputFile.write(endLine) numCubeVertices = len(v) for p in points: for i in range(numCubeVertices): outputFile.write(('v %f %f %f' % ((v[i][0] + p[0]), (v[i][1] + p[1]), (v[i][2] + p[2])))) outputFile.write(endLine) for vertexNormalString in vn: outputFile.write(vertexNormalString) outputFile.write(endLine) for cubeIndex in range(len(points)): vertexIndexOffset = (cubeIndex * numCubeVertices) for faceIndex in range(len(f)): face = f[faceIndex].makeFace(vertexIndexOffset) outputFile.write(('f ' + face.toString())) outputFile.write(endLine) outputFile.close()<|docstring|>OBJ file creation test that puts cube at each point<|endoftext|>
f00ed7b22315b73c5452ee5662e600a15c7cabc91b54de1c25c4c1eb3f036ba7
def make_mask(cell, coords, relativity=0, shls_slice=None, verbose=None): 'Mask to indicate whether a shell is zero on grid.\n The resultant mask array is an extension to the mask array used in\n molecular code (see also pyscf.dft.numint.make_mask function).\n For given shell ID and block ID, the value of the extended mask array\n means the number of images in Ls that does not vanish.\n ' coords = np.asarray(coords, order='F') natm = ctypes.c_int(cell._atm.shape[0]) nbas = ctypes.c_int(cell.nbas) ngrids = len(coords) if (shls_slice is None): shls_slice = (0, cell.nbas) assert (shls_slice == (0, cell.nbas)) Ls = cell.get_lattice_Ls() Ls = Ls[np.argsort(lib.norm(Ls, axis=1))] non0tab = np.empty(((((ngrids + BLKSIZE) - 1) // BLKSIZE), cell.nbas), dtype=np.uint8) libpbc.PBCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(ngrids), Ls.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(Ls)), cell._atm.ctypes.data_as(ctypes.c_void_p), natm, cell._bas.ctypes.data_as(ctypes.c_void_p), nbas, cell._env.ctypes.data_as(ctypes.c_void_p)) return non0tab
Mask to indicate whether a shell is zero on grid. The resultant mask array is an extension to the mask array used in molecular code (see also pyscf.dft.numint.make_mask function). For given shell ID and block ID, the value of the extended mask array means the number of images in Ls that does not vanish.
pyscf/pbc/dft/gen_grid.py
make_mask
etaffet/pyscf
1
python
def make_mask(cell, coords, relativity=0, shls_slice=None, verbose=None): 'Mask to indicate whether a shell is zero on grid.\n The resultant mask array is an extension to the mask array used in\n molecular code (see also pyscf.dft.numint.make_mask function).\n For given shell ID and block ID, the value of the extended mask array\n means the number of images in Ls that does not vanish.\n ' coords = np.asarray(coords, order='F') natm = ctypes.c_int(cell._atm.shape[0]) nbas = ctypes.c_int(cell.nbas) ngrids = len(coords) if (shls_slice is None): shls_slice = (0, cell.nbas) assert (shls_slice == (0, cell.nbas)) Ls = cell.get_lattice_Ls() Ls = Ls[np.argsort(lib.norm(Ls, axis=1))] non0tab = np.empty(((((ngrids + BLKSIZE) - 1) // BLKSIZE), cell.nbas), dtype=np.uint8) libpbc.PBCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(ngrids), Ls.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(Ls)), cell._atm.ctypes.data_as(ctypes.c_void_p), natm, cell._bas.ctypes.data_as(ctypes.c_void_p), nbas, cell._env.ctypes.data_as(ctypes.c_void_p)) return non0tab
def make_mask(cell, coords, relativity=0, shls_slice=None, verbose=None): 'Mask to indicate whether a shell is zero on grid.\n The resultant mask array is an extension to the mask array used in\n molecular code (see also pyscf.dft.numint.make_mask function).\n For given shell ID and block ID, the value of the extended mask array\n means the number of images in Ls that does not vanish.\n ' coords = np.asarray(coords, order='F') natm = ctypes.c_int(cell._atm.shape[0]) nbas = ctypes.c_int(cell.nbas) ngrids = len(coords) if (shls_slice is None): shls_slice = (0, cell.nbas) assert (shls_slice == (0, cell.nbas)) Ls = cell.get_lattice_Ls() Ls = Ls[np.argsort(lib.norm(Ls, axis=1))] non0tab = np.empty(((((ngrids + BLKSIZE) - 1) // BLKSIZE), cell.nbas), dtype=np.uint8) libpbc.PBCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(ngrids), Ls.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(Ls)), cell._atm.ctypes.data_as(ctypes.c_void_p), natm, cell._bas.ctypes.data_as(ctypes.c_void_p), nbas, cell._env.ctypes.data_as(ctypes.c_void_p)) return non0tab<|docstring|>Mask to indicate whether a shell is zero on grid. The resultant mask array is an extension to the mask array used in molecular code (see also pyscf.dft.numint.make_mask function). For given shell ID and block ID, the value of the extended mask array means the number of images in Ls that does not vanish.<|endoftext|>
42e2783803271c97b6d8427b5bf89d5dd34fca36809f99261fbac5cf84330f9d
def gen_becke_grids(cell, atom_grid={}, radi_method=dft.radi.gauss_chebyshev, level=3, prune=nwchem_prune): 'real-space grids using Becke scheme\n\n Args:\n cell : instance of :class:`Cell`\n\n Returns:\n coords : (ngx*ngy*ngz, 3) ndarray\n The real-space grid point coordinates.\n weights : (ngx*ngy*ngz) ndarray\n ' Ls = cell.get_lattice_Ls() atm_coords = (Ls.reshape((- 1), 1, 3) + cell.atom_coords()) atom_grids_tab = gen_atomic_grids(cell, atom_grid, radi_method, level, prune) coords_all = [] weights_all = [] b = cell.reciprocal_vectors(norm_to=1) supatm_idx = [] k = 0 for (iL, L) in enumerate(Ls): for ia in range(cell.natm): (coords, vol) = atom_grids_tab[cell.atom_symbol(ia)] coords = (coords + atm_coords[(iL, ia)]) c = b.dot(coords.T).round(8) mask = np.ones(c.shape[1], dtype=bool) if (cell.dimension >= 1): mask &= ((c[0] >= 0) & (c[0] <= 1)) if (cell.dimension >= 2): mask &= ((c[1] >= 0) & (c[1] <= 1)) if (cell.dimension == 3): mask &= ((c[2] >= 0) & (c[2] <= 1)) vol = vol[mask] if (vol.size > 8): c = c[(:, mask)] if (cell.dimension >= 1): vol[(c[0] == 0)] *= 0.5 vol[(c[0] == 1)] *= 0.5 if (cell.dimension >= 2): vol[(c[1] == 0)] *= 0.5 vol[(c[1] == 1)] *= 0.5 if (cell.dimension == 3): vol[(c[2] == 0)] *= 0.5 vol[(c[2] == 1)] *= 0.5 coords = coords[mask] coords_all.append(coords) weights_all.append(vol) supatm_idx.append(k) k += 1 offs = np.append(0, np.cumsum([w.size for w in weights_all])) coords_all = np.vstack(coords_all) weights_all = np.hstack(weights_all) atm_coords = np.asarray(atm_coords.reshape((- 1), 3)[supatm_idx], order='C') sup_natm = len(atm_coords) p_radii_table = lib.c_null_ptr() fn = dft.gen_grid.libdft.VXCgen_grid ngrids = weights_all.size max_memory = (cell.max_memory - lib.current_memory()[0]) blocksize = min(ngrids, max(2000, int((((max_memory * 1000000.0) / 8) / sup_natm)))) displs = lib.misc._blocksize_partition(offs, blocksize) for (n0, n1) in zip(displs[:(- 1)], displs[1:]): (p0, p1) = (offs[n0], offs[n1]) pbecke = np.empty((sup_natm, (p1 - p0))) coords = np.asarray(coords_all[p0:p1], order='F') fn(pbecke.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), atm_coords.ctypes.data_as(ctypes.c_void_p), p_radii_table, ctypes.c_int(sup_natm), ctypes.c_int((p1 - p0))) weights_all[p0:p1] /= pbecke.sum(axis=0) for ia in range(n0, n1): (i0, i1) = (offs[ia], offs[(ia + 1)]) weights_all[i0:i1] *= pbecke[(ia, (i0 - p0):(i1 - p0))] return (coords_all, weights_all)
real-space grids using Becke scheme Args: cell : instance of :class:`Cell` Returns: coords : (ngx*ngy*ngz, 3) ndarray The real-space grid point coordinates. weights : (ngx*ngy*ngz) ndarray
pyscf/pbc/dft/gen_grid.py
gen_becke_grids
etaffet/pyscf
1
python
def gen_becke_grids(cell, atom_grid={}, radi_method=dft.radi.gauss_chebyshev, level=3, prune=nwchem_prune): 'real-space grids using Becke scheme\n\n Args:\n cell : instance of :class:`Cell`\n\n Returns:\n coords : (ngx*ngy*ngz, 3) ndarray\n The real-space grid point coordinates.\n weights : (ngx*ngy*ngz) ndarray\n ' Ls = cell.get_lattice_Ls() atm_coords = (Ls.reshape((- 1), 1, 3) + cell.atom_coords()) atom_grids_tab = gen_atomic_grids(cell, atom_grid, radi_method, level, prune) coords_all = [] weights_all = [] b = cell.reciprocal_vectors(norm_to=1) supatm_idx = [] k = 0 for (iL, L) in enumerate(Ls): for ia in range(cell.natm): (coords, vol) = atom_grids_tab[cell.atom_symbol(ia)] coords = (coords + atm_coords[(iL, ia)]) c = b.dot(coords.T).round(8) mask = np.ones(c.shape[1], dtype=bool) if (cell.dimension >= 1): mask &= ((c[0] >= 0) & (c[0] <= 1)) if (cell.dimension >= 2): mask &= ((c[1] >= 0) & (c[1] <= 1)) if (cell.dimension == 3): mask &= ((c[2] >= 0) & (c[2] <= 1)) vol = vol[mask] if (vol.size > 8): c = c[(:, mask)] if (cell.dimension >= 1): vol[(c[0] == 0)] *= 0.5 vol[(c[0] == 1)] *= 0.5 if (cell.dimension >= 2): vol[(c[1] == 0)] *= 0.5 vol[(c[1] == 1)] *= 0.5 if (cell.dimension == 3): vol[(c[2] == 0)] *= 0.5 vol[(c[2] == 1)] *= 0.5 coords = coords[mask] coords_all.append(coords) weights_all.append(vol) supatm_idx.append(k) k += 1 offs = np.append(0, np.cumsum([w.size for w in weights_all])) coords_all = np.vstack(coords_all) weights_all = np.hstack(weights_all) atm_coords = np.asarray(atm_coords.reshape((- 1), 3)[supatm_idx], order='C') sup_natm = len(atm_coords) p_radii_table = lib.c_null_ptr() fn = dft.gen_grid.libdft.VXCgen_grid ngrids = weights_all.size max_memory = (cell.max_memory - lib.current_memory()[0]) blocksize = min(ngrids, max(2000, int((((max_memory * 1000000.0) / 8) / sup_natm)))) displs = lib.misc._blocksize_partition(offs, blocksize) for (n0, n1) in zip(displs[:(- 1)], displs[1:]): (p0, p1) = (offs[n0], offs[n1]) pbecke = np.empty((sup_natm, (p1 - p0))) coords = np.asarray(coords_all[p0:p1], order='F') fn(pbecke.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), atm_coords.ctypes.data_as(ctypes.c_void_p), p_radii_table, ctypes.c_int(sup_natm), ctypes.c_int((p1 - p0))) weights_all[p0:p1] /= pbecke.sum(axis=0) for ia in range(n0, n1): (i0, i1) = (offs[ia], offs[(ia + 1)]) weights_all[i0:i1] *= pbecke[(ia, (i0 - p0):(i1 - p0))] return (coords_all, weights_all)
def gen_becke_grids(cell, atom_grid={}, radi_method=dft.radi.gauss_chebyshev, level=3, prune=nwchem_prune): 'real-space grids using Becke scheme\n\n Args:\n cell : instance of :class:`Cell`\n\n Returns:\n coords : (ngx*ngy*ngz, 3) ndarray\n The real-space grid point coordinates.\n weights : (ngx*ngy*ngz) ndarray\n ' Ls = cell.get_lattice_Ls() atm_coords = (Ls.reshape((- 1), 1, 3) + cell.atom_coords()) atom_grids_tab = gen_atomic_grids(cell, atom_grid, radi_method, level, prune) coords_all = [] weights_all = [] b = cell.reciprocal_vectors(norm_to=1) supatm_idx = [] k = 0 for (iL, L) in enumerate(Ls): for ia in range(cell.natm): (coords, vol) = atom_grids_tab[cell.atom_symbol(ia)] coords = (coords + atm_coords[(iL, ia)]) c = b.dot(coords.T).round(8) mask = np.ones(c.shape[1], dtype=bool) if (cell.dimension >= 1): mask &= ((c[0] >= 0) & (c[0] <= 1)) if (cell.dimension >= 2): mask &= ((c[1] >= 0) & (c[1] <= 1)) if (cell.dimension == 3): mask &= ((c[2] >= 0) & (c[2] <= 1)) vol = vol[mask] if (vol.size > 8): c = c[(:, mask)] if (cell.dimension >= 1): vol[(c[0] == 0)] *= 0.5 vol[(c[0] == 1)] *= 0.5 if (cell.dimension >= 2): vol[(c[1] == 0)] *= 0.5 vol[(c[1] == 1)] *= 0.5 if (cell.dimension == 3): vol[(c[2] == 0)] *= 0.5 vol[(c[2] == 1)] *= 0.5 coords = coords[mask] coords_all.append(coords) weights_all.append(vol) supatm_idx.append(k) k += 1 offs = np.append(0, np.cumsum([w.size for w in weights_all])) coords_all = np.vstack(coords_all) weights_all = np.hstack(weights_all) atm_coords = np.asarray(atm_coords.reshape((- 1), 3)[supatm_idx], order='C') sup_natm = len(atm_coords) p_radii_table = lib.c_null_ptr() fn = dft.gen_grid.libdft.VXCgen_grid ngrids = weights_all.size max_memory = (cell.max_memory - lib.current_memory()[0]) blocksize = min(ngrids, max(2000, int((((max_memory * 1000000.0) / 8) / sup_natm)))) displs = lib.misc._blocksize_partition(offs, blocksize) for (n0, n1) in zip(displs[:(- 1)], displs[1:]): (p0, p1) = (offs[n0], offs[n1]) pbecke = np.empty((sup_natm, (p1 - p0))) coords = np.asarray(coords_all[p0:p1], order='F') fn(pbecke.ctypes.data_as(ctypes.c_void_p), coords.ctypes.data_as(ctypes.c_void_p), atm_coords.ctypes.data_as(ctypes.c_void_p), p_radii_table, ctypes.c_int(sup_natm), ctypes.c_int((p1 - p0))) weights_all[p0:p1] /= pbecke.sum(axis=0) for ia in range(n0, n1): (i0, i1) = (offs[ia], offs[(ia + 1)]) weights_all[i0:i1] *= pbecke[(ia, (i0 - p0):(i1 - p0))] return (coords_all, weights_all)<|docstring|>real-space grids using Becke scheme Args: cell : instance of :class:`Cell` Returns: coords : (ngx*ngy*ngz, 3) ndarray The real-space grid point coordinates. weights : (ngx*ngy*ngz) ndarray<|endoftext|>
ec39ec64be44b8fe0de4c327a703a3ce0c2ad56ce2f7b4d506a2c60b8684ddbc
def __init__(self, db_name: str, db_host: Optional[str], db_port: Optional[int], db_userName: Optional[str], db_password: Optional[str], driverAlias: str, name: str=None) -> None: 'Database connection through a sql driver.' from .pnsqldrivers import PNSqlDrivers super(PNConnection, self).__init__() self.currentSavePoint_ = None self.driverSql = PNSqlDrivers() self.connAux: Dict[(str, 'IConnection')] = {} if (name is None): self.name = 'default' else: self.name = name self.driverName_ = self.driverSql.aliasToName(driverAlias) if (name and (name not in ('dbAux', 'Aux'))): self._isOpen = False return if (self.driverName_ and self.driverSql.loadDriver(self.driverName_)): self.driver_ = self.driverSql.driver() self.conn = self.conectar(db_name, db_host, db_port, db_userName, db_password) if (self.conn is False): return self._isOpen = True else: logger.error("PNConnection.ERROR: No se encontro el driver '%s'", driverAlias) import sys sys.exit(0) self.transaction_ = 0 self.stackSavePoints_ = [] self.queueSavePoints_ = [] self.interactiveGUI_ = True self.lastActiveCursor_ = None self.driver().db_ = self
Database connection through a sql driver.
pineboolib/application/database/pnconnection.py
__init__
deavid/pineboo
2
python
def __init__(self, db_name: str, db_host: Optional[str], db_port: Optional[int], db_userName: Optional[str], db_password: Optional[str], driverAlias: str, name: str=None) -> None: from .pnsqldrivers import PNSqlDrivers super(PNConnection, self).__init__() self.currentSavePoint_ = None self.driverSql = PNSqlDrivers() self.connAux: Dict[(str, 'IConnection')] = {} if (name is None): self.name = 'default' else: self.name = name self.driverName_ = self.driverSql.aliasToName(driverAlias) if (name and (name not in ('dbAux', 'Aux'))): self._isOpen = False return if (self.driverName_ and self.driverSql.loadDriver(self.driverName_)): self.driver_ = self.driverSql.driver() self.conn = self.conectar(db_name, db_host, db_port, db_userName, db_password) if (self.conn is False): return self._isOpen = True else: logger.error("PNConnection.ERROR: No se encontro el driver '%s'", driverAlias) import sys sys.exit(0) self.transaction_ = 0 self.stackSavePoints_ = [] self.queueSavePoints_ = [] self.interactiveGUI_ = True self.lastActiveCursor_ = None self.driver().db_ = self
def __init__(self, db_name: str, db_host: Optional[str], db_port: Optional[int], db_userName: Optional[str], db_password: Optional[str], driverAlias: str, name: str=None) -> None: from .pnsqldrivers import PNSqlDrivers super(PNConnection, self).__init__() self.currentSavePoint_ = None self.driverSql = PNSqlDrivers() self.connAux: Dict[(str, 'IConnection')] = {} if (name is None): self.name = 'default' else: self.name = name self.driverName_ = self.driverSql.aliasToName(driverAlias) if (name and (name not in ('dbAux', 'Aux'))): self._isOpen = False return if (self.driverName_ and self.driverSql.loadDriver(self.driverName_)): self.driver_ = self.driverSql.driver() self.conn = self.conectar(db_name, db_host, db_port, db_userName, db_password) if (self.conn is False): return self._isOpen = True else: logger.error("PNConnection.ERROR: No se encontro el driver '%s'", driverAlias) import sys sys.exit(0) self.transaction_ = 0 self.stackSavePoints_ = [] self.queueSavePoints_ = [] self.interactiveGUI_ = True self.lastActiveCursor_ = None self.driver().db_ = self<|docstring|>Database connection through a sql driver.<|endoftext|>
df30388ca458a831adcd082ab18dc96de473b2f96991cda72de430e6bde54692
@decorators.NotImplementedWarn def finish(self) -> None: 'Set the connection as terminated.' pass
Set the connection as terminated.
pineboolib/application/database/pnconnection.py
finish
deavid/pineboo
2
python
@decorators.NotImplementedWarn def finish(self) -> None: pass
@decorators.NotImplementedWarn def finish(self) -> None: pass<|docstring|>Set the connection as terminated.<|endoftext|>
ef15ee5337cfcf0cc8a9bafbf71a664a77c65ace17c86e92c563bdf6ca0ceecc
def connectionName(self) -> Any: 'Get the current connection name for this cursor.' return self.name
Get the current connection name for this cursor.
pineboolib/application/database/pnconnection.py
connectionName
deavid/pineboo
2
python
def connectionName(self) -> Any: return self.name
def connectionName(self) -> Any: return self.name<|docstring|>Get the current connection name for this cursor.<|endoftext|>
8d0abc8b8fdc41222b2ac26088ec723308132e415a110091badf2eaa2c52c9c7
def useConn(self, name_or_conn: Union[(str, IConnection)]='default') -> IConnection: '\n Select another connection which can be not the default one.\n\n Allow you to select a connection.\n ' name: str if isinstance(name_or_conn, IConnection): name = str(name_or_conn.connectionName()) else: name = str(name_or_conn) if (name in ('default', None)): return self connection = self.connAux.get(name, None) if (connection is None): if (self.driverSql is None): raise Exception('No driver selected') connection = PNConnection(self.db_name, self.db_host, self.db_port, self.db_userName, self.db_password, self.driverSql.nameToAlias(self.driverName()), name) self.connAux[name] = connection return connection
Select another connection which can be not the default one. Allow you to select a connection.
pineboolib/application/database/pnconnection.py
useConn
deavid/pineboo
2
python
def useConn(self, name_or_conn: Union[(str, IConnection)]='default') -> IConnection: '\n Select another connection which can be not the default one.\n\n Allow you to select a connection.\n ' name: str if isinstance(name_or_conn, IConnection): name = str(name_or_conn.connectionName()) else: name = str(name_or_conn) if (name in ('default', None)): return self connection = self.connAux.get(name, None) if (connection is None): if (self.driverSql is None): raise Exception('No driver selected') connection = PNConnection(self.db_name, self.db_host, self.db_port, self.db_userName, self.db_password, self.driverSql.nameToAlias(self.driverName()), name) self.connAux[name] = connection return connection
def useConn(self, name_or_conn: Union[(str, IConnection)]='default') -> IConnection: '\n Select another connection which can be not the default one.\n\n Allow you to select a connection.\n ' name: str if isinstance(name_or_conn, IConnection): name = str(name_or_conn.connectionName()) else: name = str(name_or_conn) if (name in ('default', None)): return self connection = self.connAux.get(name, None) if (connection is None): if (self.driverSql is None): raise Exception('No driver selected') connection = PNConnection(self.db_name, self.db_host, self.db_port, self.db_userName, self.db_password, self.driverSql.nameToAlias(self.driverName()), name) self.connAux[name] = connection return connection<|docstring|>Select another connection which can be not the default one. Allow you to select a connection.<|endoftext|>
03fb3742da06223be389a0938a562bc52ec5bcfaa1ea003b596bef6080730a34
def dictDatabases(self) -> Dict[(str, 'IConnection')]: 'Return dict with own database connections.' return self.connAux
Return dict with own database connections.
pineboolib/application/database/pnconnection.py
dictDatabases
deavid/pineboo
2
python
def dictDatabases(self) -> Dict[(str, 'IConnection')]: return self.connAux
def dictDatabases(self) -> Dict[(str, 'IConnection')]: return self.connAux<|docstring|>Return dict with own database connections.<|endoftext|>
5d358f2b84ef2f463efd97f216a6bd1827ec120e0e4d945831da394e21cd4ec4
def removeConn(self, name='default') -> bool: 'Delete a connection specified by name.' try: conn_ = self.useConn(name).conn if (conn_ is not None): conn_.close() del self.connAux[name] except Exception: pass return True
Delete a connection specified by name.
pineboolib/application/database/pnconnection.py
removeConn
deavid/pineboo
2
python
def removeConn(self, name='default') -> bool: try: conn_ = self.useConn(name).conn if (conn_ is not None): conn_.close() del self.connAux[name] except Exception: pass return True
def removeConn(self, name='default') -> bool: try: conn_ = self.useConn(name).conn if (conn_ is not None): conn_.close() del self.connAux[name] except Exception: pass return True<|docstring|>Delete a connection specified by name.<|endoftext|>