text
stringlengths
81
112k
Rank-1 operation on complex symmetric matrix. def cublasZsyr(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on complex symmetric matrix. """ status = _libcublas.cublasZsyr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(x), incx, int(A), lda) cublasCheckStatus(status)
Rank-2 operation on real symmetric matrix. def cublasSsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasSsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-2 operation on real symmetric matrix. def cublasDsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasDsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Matrix-vector product for real triangular-banded matrix. def cublasStbmv(handle, uplo, trans, diag, n, k, A, lda, x, incx): """ Matrix-vector product for real triangular-banded matrix. """ status = _libcublas.cublasStbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, k, int(A), lda, int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular-packed matrix. def cublasStpmv(handle, uplo, trans, diag, n, AP, x, incx): """ Matrix-vector product for real triangular-packed matrix. """ status = _libcublas.cublasStpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for complex triangular-packed matrix. def cublasCtpmv(handle, uplo, trans, diag, n, AP, x, incx): """ Matrix-vector product for complex triangular-packed matrix. """ status = _libcublas.cublasCtpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular-packed matrix. def cublasDtpmv(handle, uplo, trans, diag, n, AP, x, incx): """ Matrix-vector product for real triangular-packed matrix. """ status = _libcublas.cublasDtpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for complex triangular-packed matrix. def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx): """ Matrix-vector product for complex triangular-packed matrix. """ status = _libcublas.cublasZtpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve real triangular-packed system with one right-hand side. def cublasStpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve real triangular-packed system with one right-hand side. """ status = _libcublas.cublasStpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve real triangular-packed system with one right-hand side. def cublasDtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve real triangular-packed system with one right-hand side. """ status = _libcublas.cublasDtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve complex triangular-packed system with one right-hand side. def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve complex triangular-packed system with one right-hand side. """ status = _libcublas.cublasCtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve complex triangular-packed system with one right-hand size. def cublasZtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve complex triangular-packed system with one right-hand size. """ status = _libcublas.cublasZtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for complex triangular matrix. def cublasCtrmv(handle, uplo, trans, diag, n, A, lda, x, incx): """ Matrix-vector product for complex triangular matrix. """ status = _libcublas.cublasCtrmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(A), lda, int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular matrix. def cublasDtrmv(handle, uplo, trans, diag, n, A, lda, x, inx): """ Matrix-vector product for real triangular matrix. """ status = _libcublas.cublasDtrmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(A), lda, int(x), inx) cublasCheckStatus(status)
Matrix-vector product for Hermitian-packed matrix. def cublasChpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy): """ Matrix-vector product for Hermitian-packed matrix. """ status = _libcublas.cublasChpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(AP), int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for Hermitian-packed matrix. def cublasZhpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy): """ Matrix-vector product for Hermitian-packed matrix. """ status = _libcublas.cublasZhpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(AP), int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Rank-1 operation on Hermitian matrix. def cublasCher(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on Hermitian matrix. """ status = _libcublas.cublasCher_v2(handle, _CUBLAS_FILL_MODE[uplo], n, alpha, int(x), incx, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on Hermitian matrix. def cublasZher(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on Hermitian matrix. """ status = _libcublas.cublasZher_v2(handle, _CUBLAS_FILL_MODE[uplo], n, alpha, int(x), incx, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on Hermitian-packed matrix. def cublasChpr(handle, uplo, n, alpha, x, incx, AP): """ Rank-1 operation on Hermitian-packed matrix. """ status = _libcublas.cublasChpr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(AP)) cublasCheckStatus(status)
Rank-1 operation on Hermitian-packed matrix. def cublasZhpr(handle, uplo, n, alpha, x, incx, AP): """ Rank-1 operation on Hermitian-packed matrix. """ status = _libcublas.cublasZhpr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(AP)) cublasCheckStatus(status)
Rank-2 operation on Hermitian-packed matrix. def cublasChpr2(handle, uplo, n, alpha, x, inx, y, incy, AP): """ Rank-2 operation on Hermitian-packed matrix. """ status = _libcublas.cublasChpr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(x), incx, int(y), incy, int(AP)) cublasCheckStatus(status)
Rank-2 operation on Hermitian-packed matrix. def cublasZhpr2(handle, uplo, n, alpha, x, inx, y, incy, AP): """ Rank-2 operation on Hermitian-packed matrix. """ status = _libcublas.cublasZhpr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(x), incx, int(y), incy, int(AP)) cublasCheckStatus(status)
Matrix-matrix product for real general matrix. def cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for real general matrix. """ status = _libcublas.cublasSgemm_v2(handle, _CUBLAS_OP[transa], _CUBLAS_OP[transb], m, n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_float(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for real general matrix. def cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for real general matrix. """ status = _libcublas.cublasDgemm_v2(handle, _CUBLAS_OP[transa], _CUBLAS_OP[transb], m, n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for complex general matrix. def cublasZgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for complex general matrix. """ status = _libcublas.cublasZgemm_v2(handle, _CUBLAS_OP[transa], _CUBLAS_OP[transb], m, n, k, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(B), ldb, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for symmetric matrix. def cublasSsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for symmetric matrix. """ status = _libcublas.cublasSsymm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_float(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for real symmetric matrix. def cublasDsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for real symmetric matrix. """ status = _libcublas.cublasDsymm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], m, n, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for complex symmetric matrix. def cublasCsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc): """ Matrix-matrix product for complex symmetric matrix. """ status = _libcublas.cublasCsymm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(B), ldb, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Rank-k operation on real symmetric matrix. def cublasSsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): """ Rank-k operation on real symmetric matrix. """ status = _libcublas.cublasSsyrk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, ctypes.byref(ctypes.c_float(beta)), int(C), ldc) cublasCheckStatus(status)
Rank-k operation on real symmetric matrix. def cublasDsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): """ Rank-k operation on real symmetric matrix. """ status = _libcublas.cublasDsyrk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Rank-k operation on complex symmetric matrix. def cublasZsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): """ Rank-k operation on complex symmetric matrix. """ status = _libcublas.cublasZsyrk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Rank-2k operation on real symmetric matrix. def cublasSsyr2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Rank-2k operation on real symmetric matrix. """ status = _libcublas.cublasSsyr2k_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_float(beta)), int(C), ldc) cublasCheckStatus(status)
Rank-2k operation on real symmetric matrix. def cublasDsyr2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Rank-2k operation on real symmetric matrix. """ status = _libcublas.cublasDsyr2k_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(B), ldb, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for real triangular matrix. def cublasStrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc): """ Matrix-matrix product for real triangular matrix. """ status = _libcublas.cublasStrmm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb, int(C), ldc) cublasCheckStatus(status)
Matrix-matrix product for complex triangular matrix. def cublasZtrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc): """ Matrix-matrix product for complex triangular matrix. """ status = _libcublas.cublasZtrmm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(B), ldb, int(C), ldc) cublasCheckStatus(status)
Solve a real triangular system with multiple right-hand sides. def cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb): """ Solve a real triangular system with multiple right-hand sides. """ status = _libcublas.cublasStrsm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(B), ldb) cublasCheckStatus(status)
Solve a real triangular system with multiple right-hand sides. def cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb): """ Solve a real triangular system with multiple right-hand sides. """ status = _libcublas.cublasDtrsm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], m, n, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(B), ldb) cublasCheckStatus(status)
Solve complex triangular system with multiple right-hand sides. def cublasZtrsm(handle, side, uplo, transa, diag, m, n, alpha, A, lda, B, ldb): """ Solve complex triangular system with multiple right-hand sides. """ status = _libcublas.cublasZtrsm_v2(handle, _CUBLAS_SIDE_MODE[side], _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(B), ldb) cublasCheckStatus(status)
Rank-k operation on Hermitian matrix. def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): """ Rank-k operation on Hermitian matrix. """ status = _libcublas.cublasZherk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
Rank-2k operation on Hermitian matrix. def cublasCher2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc): """ Rank-2k operation on Hermitian matrix. """ status = _libcublas.cublasCher2k_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(B), ldb, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(C), ldc) cublasCheckStatus(status)
Matrix-diagonal matrix product for real general matrix. def cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc): """ Matrix-diagonal matrix product for real general matrix. """ status = _libcublas.cublasSdgmm(handle, _CUBLAS_SIDE[mode], m, n, int(A), lda, int(x), incx, int(C), ldc) cublasCheckStatus(status)
Read an EROS light curve and return its data. Parameters ---------- filename : str, optional A light-curve filename. Returns ------- dates : numpy.ndarray An array of dates. magnitudes : numpy.ndarray An array of magnitudes. errors : numpy.ndarray An array of magnitudes errors. def load_EROS_lc(filename='lm0010n22323.time'): """ Read an EROS light curve and return its data. Parameters ---------- filename : str, optional A light-curve filename. Returns ------- dates : numpy.ndarray An array of dates. magnitudes : numpy.ndarray An array of magnitudes. errors : numpy.ndarray An array of magnitudes errors. """ module_path = dirname(__file__) file_path = join(module_path, 'lightcurves', filename) data = np.loadtxt(file_path) date = data[:, 0] mag = data[:, 1] err = data[:, 2] return date, mag, err
Return the UPSILoN random forests classifier. The classifier is trained using OGLE and EROS periodic variables (Kim et al. 2015). Returns ------- clf : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests classifier. def load_rf_model(): """ Return the UPSILoN random forests classifier. The classifier is trained using OGLE and EROS periodic variables (Kim et al. 2015). Returns ------- clf : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests classifier. """ import gzip try: import cPickle as pickle except: import pickle module_path = dirname(__file__) file_path = join(module_path, 'models/rf.model.sub.github.gz') # For Python 3. if sys.version_info.major >= 3: clf = pickle.load(gzip.open(file_path, 'rb'), encoding='latin1') # For Python 2. else: clf = pickle.load(gzip.open(file_path, 'rb')) return clf
Samples a dropout mask and applies it in place def sample_dropout_mask(x, dropout_probability=.5, columns=None, stream=None, target=None, dropout_mask=None, dropout_prob_array=None): """ Samples a dropout mask and applies it in place""" assert x.flags.c_contiguous if columns is not None: assert len(columns) == 2 x_tmp = x x = extract_columns(x, columns[0], columns[1]) shape = x.shape if dropout_prob_array is None: dropout_prob_array = gpuarray.empty(shape, x.dtype, allocator=memory_pool.allocate) sampler.fill_uniform(dropout_prob_array, stream) if dropout_mask is None: dropout_mask = gpuarray.empty(shape, np.int8, allocator=memory_pool.allocate) if target is None: target = x all_kernels['sample_dropout_mask']( x, target, dropout_mask, dropout_prob_array, np.float32(dropout_probability)) if columns is not None: insert_columns(x, x_tmp, columns[0]) return dropout_mask
Load but don't evaluate a GCL expression from a string. def reads(s, filename=None, loader=None, implicit_tuple=True, allow_errors=False): """Load but don't evaluate a GCL expression from a string.""" return ast.reads(s, filename=filename or '<input>', loader=loader or default_loader, implicit_tuple=implicit_tuple, allow_errors=allow_errors)
Load but don't evaluate a GCL expression from a file. def read(filename, loader=None, implicit_tuple=True, allow_errors=False): """Load but don't evaluate a GCL expression from a file.""" with open(filename, 'r') as f: return reads(f.read(), filename=filename, loader=loader, implicit_tuple=implicit_tuple, allow_errors=allow_errors)
Load and evaluate a GCL expression from a string. def loads(s, filename=None, loader=None, implicit_tuple=True, env={}, schema=None): """Load and evaluate a GCL expression from a string.""" ast = reads(s, filename=filename, loader=loader, implicit_tuple=implicit_tuple) if not isinstance(env, framework.Environment): # For backwards compatibility we accept an Environment object. Otherwise assume it's a dict # whose bindings will add/overwrite the default bindings. env = framework.Environment(dict(_default_bindings, **env)) obj = framework.eval(ast, env) return mod_schema.validate(obj, schema)
Load and evaluate a GCL expression from a file. def load(filename, loader=None, implicit_tuple=True, env={}, schema=None): """Load and evaluate a GCL expression from a file.""" with open(filename, 'r') as f: return loads(f.read(), filename=filename, loader=loader, implicit_tuple=implicit_tuple, env=env, schema=schema)
Increases linearly and then stays flat def linear_scheduler_up(init_value, target_value, duration): """ Increases linearly and then stays flat """ value = init_value t = 0 while True: yield value t += 1 if t < duration: value = init_value + t * (target_value - init_value) / duration else: value = target_value
Increases linearly to target_value, stays at target_value until t_decrease and then decreases linearly def linear_scheduler_up_down(init_value, target_value, final_value, duration_up, t_decrease, duration_down): """ Increases linearly to target_value, stays at target_value until t_decrease and then decreases linearly """ value = init_value t = 0 while True: yield value t += 1 if t < duration_up: value = init_value + t * (target_value - init_value) / \ float(duration_up) elif t > t_decrease: value = target_value - (t - t_decrease) * \ (target_value - final_value) / \ float(duration_down) else: value = target_value
Loads a YAML configuration from a string or file-like object. Parameters ---------- stream : str or object Either a string containing valid YAML or a file-like object supporting the .read() interface. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. def load(stream, overrides=None, **kwargs): """ Loads a YAML configuration from a string or file-like object. Parameters ---------- stream : str or object Either a string containing valid YAML or a file-like object supporting the .read() interface. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. """ global is_initialized if not is_initialized: initialize() if isinstance(stream, basestring): string = stream else: string = '\n'.join(stream.readlines()) # processed_string = preprocess(string) proxy_graph = yaml.load(string, **kwargs) from . import init init_dict = proxy_graph.get('init', {}) init(**init_dict) if overrides is not None: handle_overrides(proxy_graph, overrides) return instantiate_all(proxy_graph)
Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. def load_path(path, overrides=None, **kwargs): """ Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. """ f = open(path, 'r') content = ''.join(f.readlines()) f.close() if not isinstance(content, str): raise AssertionError("Expected content to be of type str but it is "+str(type(content))) return load(content, **kwargs)
Handle any overrides for this model configuration. Parameters ---------- graph : dict or object A dictionary (or an ObjectProxy) containing the object graph loaded from a YAML file. overrides : dict A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". def handle_overrides(graph, overrides): """ Handle any overrides for this model configuration. Parameters ---------- graph : dict or object A dictionary (or an ObjectProxy) containing the object graph loaded from a YAML file. overrides : dict A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". """ for key in overrides: levels = key.split('.') part = graph for lvl in levels[:-1]: try: part = part[lvl] except KeyError: raise KeyError("'%s' override failed at '%s'", (key, lvl)) try: part[levels[-1]] = overrides[key] except KeyError: raise KeyError("'%s' override failed at '%s'", (key, levels[-1]))
Instantiate all ObjectProxy objects in a nested hierarchy. Parameters ---------- graph : dict or object A dictionary (or an ObjectProxy) containing the object graph loaded from a YAML file. Returns ------- graph : dict or object The dictionary or object resulting after the recursive instantiation. def instantiate_all(graph): """ Instantiate all ObjectProxy objects in a nested hierarchy. Parameters ---------- graph : dict or object A dictionary (or an ObjectProxy) containing the object graph loaded from a YAML file. Returns ------- graph : dict or object The dictionary or object resulting after the recursive instantiation. """ def should_instantiate(obj): classes = [ObjectProxy, dict, list] return True in [isinstance(obj, cls) for cls in classes] if not isinstance(graph, list): for key in graph: if should_instantiate(graph[key]): graph[key] = instantiate_all(graph[key]) if hasattr(graph, 'keys'): for key in graph.keys(): if should_instantiate(key): new_key = instantiate_all(key) graph[new_key] = graph[key] del graph[key] if isinstance(graph, ObjectProxy): graph = graph.instantiate() if isinstance(graph, list): for i, elem in enumerate(graph): if should_instantiate(elem): graph[i] = instantiate_all(elem) return graph
Constructor function passed to PyYAML telling it how to construct objects from argument descriptions. See PyYAML documentation for details on the call signature. def multi_constructor(loader, tag_suffix, node): """ Constructor function passed to PyYAML telling it how to construct objects from argument descriptions. See PyYAML documentation for details on the call signature. """ yaml_src = yaml.serialize(node) mapping = loader.construct_mapping(node) if '.' not in tag_suffix: classname = tag_suffix rval = ObjectProxy(classname, mapping, yaml_src) else: classname = try_to_import(tag_suffix) rval = ObjectProxy(classname, mapping, yaml_src) return rval
Constructor function passed to PyYAML telling it how to load objects from paths to .pkl files. See PyYAML documentation for details on the call signature. def multi_constructor_pkl(loader, tag_suffix, node): """ Constructor function passed to PyYAML telling it how to load objects from paths to .pkl files. See PyYAML documentation for details on the call signature. """ mapping = loader.construct_yaml_str(node) if tag_suffix != "" and tag_suffix != u"": raise AssertionError('Expected tag_suffix to be "" but it is "'+tag_suffix+'"') rval = ObjectProxy(None, {}, yaml.serialize(node)) rval.instance = serial.load(mapping) return rval
Initialize the configuration system by installing YAML handlers. Automatically done on first call to load() specified in this file. def initialize(): """ Initialize the configuration system by installing YAML handlers. Automatically done on first call to load() specified in this file. """ global is_initialized # Add the custom multi-constructor yaml.add_multi_constructor('!obj:', multi_constructor) yaml.add_multi_constructor('!pkl:', multi_constructor_pkl) yaml.add_multi_constructor('!import:', multi_constructor_import) yaml.add_multi_constructor('!include:', multi_constructor_include) def import_constructor(loader, node): value = loader.construct_scalar(node) return try_to_import(value) yaml.add_constructor('!import', import_constructor) yaml.add_implicit_resolver( '!import', re.compile(r'(?:[a-zA-Z_][\w_]+\.)+[a-zA-Z_][\w_]+') ) is_initialized = True
Instantiate this object with the supplied parameters in `self.kwds`, or if already instantiated, return the cached instance. def instantiate(self): """ Instantiate this object with the supplied parameters in `self.kwds`, or if already instantiated, return the cached instance. """ if self.instance is None: self.instance = checked_call(self.cls, self.kwds) #endif try: self.instance.yaml_src = self.yaml_src except AttributeError: pass return self.instance
Propagate forward through the layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the output units. def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the output units. """ if input_data.shape[1] != self.W.shape[0]: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.W.shape[0])) activations = linalg.dot(input_data, self.W) activations = add_vec_to_mat(activations, self.b, inplace=True) return activations
Given abscissas x (which need not be equally spaced) and ordinates y, and given a desired oversampling factor ofac (a typical value being 4 or larger). this routine creates an array wk1 with a sequence of nout increasing frequencies (not angular frequencies) up to hifac times the "average" Nyquist frequency, and creates an array wk2 with the values of the Lomb normalized periodogram at those frequencies. The arrays x and y are not altered. This routine also returns jmax such that wk2(jmax) is the maximum element in wk2, and prob, an estimate of the significance of that maximum against the hypothesis of random noise. A small value of prob indicates that a significant periodic signal is present. Reference: Press, W. H. & Rybicki, G. B. 1989 ApJ vol. 338, p. 277-280. Fast algorithm for spectral analysis of unevenly sampled data (1989ApJ...338..277P) Arguments: X : Abscissas array, (e.g. an array of times). Y : Ordinates array, (e.g. corresponding counts). Ofac : Oversampling factor. Hifac : Hifac * "average" Nyquist frequency = highest frequency for which values of the Lomb normalized periodogram will be calculated. n_threads : number of threads to use. Returns: Wk1 : An array of Lomb periodogram frequencies. Wk2 : An array of corresponding values of the Lomb periodogram. Nout : Wk1 & Wk2 dimensions (number of calculated frequencies) Jmax : The array index corresponding to the MAX( Wk2 ). Prob : False Alarm Probability of the largest Periodogram value MACC : Number of interpolation points per 1/4 cycle of highest frequency History: 02/23/2009, v1.0, MF Translation of IDL code (orig. Numerical recipies) def fasper(x, y, ofac, hifac, n_threads, MACC=4): """ Given abscissas x (which need not be equally spaced) and ordinates y, and given a desired oversampling factor ofac (a typical value being 4 or larger). this routine creates an array wk1 with a sequence of nout increasing frequencies (not angular frequencies) up to hifac times the "average" Nyquist frequency, and creates an array wk2 with the values of the Lomb normalized periodogram at those frequencies. The arrays x and y are not altered. This routine also returns jmax such that wk2(jmax) is the maximum element in wk2, and prob, an estimate of the significance of that maximum against the hypothesis of random noise. A small value of prob indicates that a significant periodic signal is present. Reference: Press, W. H. & Rybicki, G. B. 1989 ApJ vol. 338, p. 277-280. Fast algorithm for spectral analysis of unevenly sampled data (1989ApJ...338..277P) Arguments: X : Abscissas array, (e.g. an array of times). Y : Ordinates array, (e.g. corresponding counts). Ofac : Oversampling factor. Hifac : Hifac * "average" Nyquist frequency = highest frequency for which values of the Lomb normalized periodogram will be calculated. n_threads : number of threads to use. Returns: Wk1 : An array of Lomb periodogram frequencies. Wk2 : An array of corresponding values of the Lomb periodogram. Nout : Wk1 & Wk2 dimensions (number of calculated frequencies) Jmax : The array index corresponding to the MAX( Wk2 ). Prob : False Alarm Probability of the largest Periodogram value MACC : Number of interpolation points per 1/4 cycle of highest frequency History: 02/23/2009, v1.0, MF Translation of IDL code (orig. Numerical recipies) """ #Check dimensions of input arrays n = long(len(x)) if n != len(y): print('Incompatible arrays.') return #print x, y, hifac, ofac nout = int(0.5*ofac*hifac*n) nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power nfreq = 64 # of 2 above nfreqt. while nfreq < nfreqt: nfreq = 2*nfreq ndim = long(2*nfreq) #Compute the mean, variance ave = y.mean() ##sample variance because the divisor is N-1 var = ((y - y.mean())**2).sum()/(len(y) - 1) # and range of the data. xmin = x.min() xmax = x.max() xdif = xmax - xmin #extrapolate the data into the workspaces if is_pyfftw: wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0. wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0. else: wk1 = zeros(ndim, dtype='complex') wk2 = zeros(ndim, dtype='complex') fac = ndim/(xdif*ofac) fndim = ndim ck = ((x - xmin)*fac) % fndim ckk = (2.0*ck) % fndim for j in range(0, n): __spread__(y[j] - ave, wk1, ndim, ck[j], MACC) __spread__(1.0, wk2, ndim, ckk[j], MACC) #Take the Fast Fourier Transforms. if is_pyfftw: fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE', threads=n_threads) wk1 = fft_wk1() * len(wk1) fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE', threads=n_threads) wk2 = fft_wk2() * len(wk2) else: wk1 = ifft(wk1)*len(wk1) wk2 = ifft(wk2)*len(wk1) wk1 = wk1[1:nout + 1] wk2 = wk2[1:nout + 1] rwk1 = wk1.real iwk1 = wk1.imag rwk2 = wk2.real iwk2 = wk2.imag df = 1.0/(xdif*ofac) #Compute the Lomb value for each frequency hypo2 = 2.0*abs(wk2) hc2wt = rwk2/hypo2 hs2wt = iwk2/hypo2 cwt = sqrt(0.5 + hc2wt) swt = sign(hs2wt)*(sqrt(0.5 - hc2wt)) den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2 cterm = (cwt*rwk1 + swt*iwk1)**2./den sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den) wk1 = df*(arange(nout, dtype='float') + 1.) wk2 = (cterm + sterm)/(2.0*var) pmax = wk2.max() jmax = wk2.argmax() #Significance estimation #expy = exp(-wk2) #effm = 2.0*(nout)/ofac #sig = effm*expy #ind = (sig > 0.01).nonzero() #sig[ind] = 1.0-(1.0-expy[ind])**effm #Estimate significance of largest peak value expy = exp(-pmax) effm = 2.0*(nout)/ofac prob = effm*expy if prob > 0.01: prob = 1.0 - (1.0 - expy)**effm return wk1, wk2, nout, jmax, prob
Return an item without validating the schema. def get_no_validate(self, key): """Return an item without validating the schema.""" x, env = self.get_thunk_env(key) # Check if this is a Thunk that needs to be lazily evaluated before we # return it. if isinstance(x, framework.Thunk): x = framework.eval(x, env) return x
Return an environment that will look up in current_scope for keys in this tuple, and the parent env otherwise. def env(self, current_scope): """Return an environment that will look up in current_scope for keys in this tuple, and the parent env otherwise. """ return self.__env_cache.get( current_scope.ident, framework.Environment, current_scope, names=self.keys(), parent=framework.Environment({'self': current_scope}, parent=self.__parent_env))
Return the thunk AND environment for validating it in for the given key. There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its environment bound on retrieval, that will be done here. def get_thunk_env(self, k): """Return the thunk AND environment for validating it in for the given key. There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its environment bound on retrieval, that will be done here. """ if k not in self.__items: raise exceptions.EvaluationError('Unknown key: %r in tuple %r' % (k, self)) x = self.__items[k] env = self.env(self) # Bind this to the tuple's parent environment if isinstance(x, framework.BindableThunk): return x.bind(self.__parent_env), env return x, env
Add a tuple schema to this object (externally imposed) def attach_schema(self, schem): """Add a tuple schema to this object (externally imposed)""" self.tuple_schema = schema.AndSchema.make(self.tuple_schema, schem)
Return the evaluated schema expression from a subkey. def get_schema_spec(self, key): """Return the evaluated schema expression from a subkey.""" member_node = self._ast_node.member.get(key, None) if not member_node: return schema.AnySchema() s = framework.eval(member_node.member_schema, self.env(self)) if not isinstance(s, schema.Schema): raise ValueError('Node %r with schema node %r should evaluate to Schema, got %r' % (member_node, member_node.member_schema, s)) return s
Return the names of fields that are required according to the schema. def get_required_fields(self): """Return the names of fields that are required according to the schema.""" return [m.name for m in self._ast_node.members if m.member_schema.required]
Return the AST node for the given member, from the first tuple that serves it. def get_member_node(self, key): """Return the AST node for the given member, from the first tuple that serves it.""" for tup, _ in self.lookups: if key in tup: return tup.get_member_node(key) raise RuntimeError('Key not found in composite tuple: %r' % key)
Return a list of keys that are exportable from this tuple. Returns all keys that are not private in any of the tuples. def exportable_keys(self): """Return a list of keys that are exportable from this tuple. Returns all keys that are not private in any of the tuples. """ keys = collections.defaultdict(list) for tup in self._tuples: for key, private in tup._keys_and_privacy().items(): keys[key].append(private) return [k for k, ps in keys.items() if not any(ps)]
Search the filesystem. def resolve(self, current_file, rel_path): """Search the filesystem.""" search_path = [path.dirname(current_file)] + self.search_path target_path = None for search in search_path: if self.exists(path.join(search, rel_path)): target_path = path.normpath(path.join(search, rel_path)) break if not target_path: raise exceptions.EvaluationError('No such file: %r, searched %s' % (rel_path, ':'.join(search_path))) return target_path, path.abspath(target_path)
Search the filesystem. def resolve(self, current_file, rel_path): """Search the filesystem.""" p = path.join(path.dirname(current_file), rel_path) if p not in self.file_dict: raise RuntimeError('No such fake file: %r' % p) return p, p
Check the call signature against a dictionary of proposed arguments, raising an informative exception in the case of mismatch. Parameters ---------- to_call : class or callable Function or class to examine (in the case of classes, the constructor call signature is analyzed) kwargs : dict Dictionary mapping parameter names (including positional arguments) to proposed values. def check_call_arguments(to_call, kwargs): """ Check the call signature against a dictionary of proposed arguments, raising an informative exception in the case of mismatch. Parameters ---------- to_call : class or callable Function or class to examine (in the case of classes, the constructor call signature is analyzed) kwargs : dict Dictionary mapping parameter names (including positional arguments) to proposed values. """ if 'self' in kwargs.keys(): raise TypeError("Your dictionary includes an entry for 'self', " "which is just asking for trouble") orig_to_call = getattr(to_call, '__name__', str(to_call)) if not isinstance(to_call, types.FunctionType): if hasattr(to_call, '__init__'): to_call = to_call.__init__ elif hasattr(to_call, '__call__'): to_call = to_call.__call__ args, varargs, keywords, defaults = inspect.getargspec(to_call) if any(not isinstance(arg, str) for arg in args): raise TypeError('%s uses argument unpacking, which is deprecated and ' 'unsupported by this pylearn2' % orig_to_call) if varargs is not None: raise TypeError('%s has a variable length argument list, but ' 'this is not supported by config resolution' % orig_to_call) if keywords is None: bad_keywords = [arg_name for arg_name in kwargs.keys() if arg_name not in args] if len(bad_keywords) > 0: bad = ', '.join(bad_keywords) args = [ arg for arg in args if arg != 'self' ] if len(args) == 0: matched_str = '(It does not support any keywords, actually)' else: matched = [ match(keyword, args) for keyword in bad_keywords ] matched_str = 'Did you mean %s?' % (', '.join(matched)) raise TypeError('%s does not support the following ' 'keywords: %s. %s' % (orig_to_call, bad, matched_str)) if defaults is None: num_defaults = 0 else: num_defaults = len(defaults) required = args[:len(args) - num_defaults] missing = [arg for arg in required if arg not in kwargs] if len(missing) > 0: #iff the im_self (or __self__) field is present, this is a # bound method, which has 'self' listed as an argument, but # which should not be supplied by kwargs is_bound = hasattr(to_call, 'im_self') or hasattr(to_call, '__self__') if len(missing) > 1 or missing[0] != 'self' or not is_bound: if 'self' in missing: missing.remove('self') missing = ', '.join([str(m) for m in missing]) raise TypeError('%s did not get these expected ' 'arguments: %s' % (orig_to_call, missing))
Return label and probability estimated. Parameters ---------- rf_model : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests model. features : array_like A list of features estimated by UPSILoN. Returns ------- label : str A predicted label (i.e. class). probability : float Class probability. flag : int Classification flag. def predict(rf_model, features): """ Return label and probability estimated. Parameters ---------- rf_model : sklearn.ensemble.RandomForestClassifier The UPSILoN random forests model. features : array_like A list of features estimated by UPSILoN. Returns ------- label : str A predicted label (i.e. class). probability : float Class probability. flag : int Classification flag. """ import numpy as np from upsilon.extract_features.feature_set import get_feature_set feature_set = get_feature_set() # Grab only necessary features. cols = [feature for feature in features if feature in feature_set] cols = sorted(cols) filtered_features = [] for i in range(len(cols)): filtered_features.append(features[cols[i]]) filtered_features = np.array(filtered_features).reshape(1, -1) # Classify. classes = rf_model.classes_ # Note that we're classifying a single source, so [0] need tobe added. probabilities = rf_model.predict_proba(filtered_features)[0] # Classification flag. flag = 0 if features['period_SNR'] < 20. or is_period_alias(features['period']): flag = 1 # Return class, probability, and flag. max_index = np.where(probabilities == np.max(probabilities)) return classes[max_index][0], probabilities[max_index][0], flag
fmt(string, [tuple]) -> string Interpolate a string, replacing {patterns} with the variables with the same name. If given a tuple, use the keys from the tuple to substitute. If not given a tuple, uses the current environment as the variable source. def fmt(str, args=None, env=None): """fmt(string, [tuple]) -> string Interpolate a string, replacing {patterns} with the variables with the same name. If given a tuple, use the keys from the tuple to substitute. If not given a tuple, uses the current environment as the variable source. """ # Normally, we'd just call str.format(**args), but we only want to evaluate # values from the tuple which are actually used in the string interpolation, # so we use proxy objects. # If no args are given, we're able to take the current environment. args = args or env proxies = {k: StringInterpolationProxy(args, k) for k in args.keys()} return str.format(**proxies)
Compose all given tuples together. def compose_all(tups): """Compose all given tuples together.""" from . import ast # I weep for humanity return functools.reduce(lambda x, y: x.compose(y), map(ast.make_tuple, tups), ast.make_tuple({}))
has(tuple, string) -> bool Return whether a given tuple has a key and the key is bound. def has_key(tup, key): """has(tuple, string) -> bool Return whether a given tuple has a key and the key is bound. """ if isinstance(tup, framework.TupleLike): return tup.is_bound(key) if isinstance(tup, dict): return key in tup if isinstance(tup, list): if not isinstance(key, int): raise ValueError('Key must be integer when checking list index') return key < len(tup) raise ValueError('Not a tuple-like object: %r' % tup)
flatten([[A]]) -> [A] Flatten a list of lists. def flatten(list_of_lists): """flatten([[A]]) -> [A] Flatten a list of lists. """ ret = [] for lst in list_of_lists: if not isinstance(lst, list): raise ValueError('%r is not a list' % lst) ret.extend(lst) return ret
Extract superdark data from ``DARKFILE`` or ``DRKCFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- dark : ndarray or `None` Superdark, if any. Subtract this to apply ``DARKCORR``. def extract_dark(prihdr, scihdu): """Extract superdark data from ``DARKFILE`` or ``DRKCFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- dark : ndarray or `None` Superdark, if any. Subtract this to apply ``DARKCORR``. """ if prihdr.get('PCTECORR', 'OMIT') == 'COMPLETE': darkfile = prihdr.get('DRKCFILE', 'N/A') else: darkfile = prihdr.get('DARKFILE', 'N/A') if darkfile == 'N/A': return None darkfile = from_irafpath(darkfile) ampstring = prihdr['CCDAMP'] # Calculate DARKTIME exptime = prihdr.get('EXPTIME', 0.0) flashdur = prihdr.get('FLASHDUR', 0.0) darktime = exptime + flashdur if exptime > 0: # Not BIAS darktime += 3.0 with fits.open(darkfile) as hdudark: if ampstring == 'ABCD': dark = np.concatenate( (hdudark['sci', 1].data, hdudark['sci', 2].data[::-1, :]), axis=1) elif ampstring in ('A', 'B', 'AB'): dark = extract_ref(scihdu, hdudark['sci', 2]) else: dark = extract_ref(scihdu, hdudark['sci', 1]) dark = dark * darktime return dark
Extract postflash data from ``FLSHFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- flash : ndarray or `None` Postflash, if any. Subtract this to apply ``FLSHCORR``. def extract_flash(prihdr, scihdu): """Extract postflash data from ``FLSHFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- flash : ndarray or `None` Postflash, if any. Subtract this to apply ``FLSHCORR``. """ flshfile = prihdr.get('FLSHFILE', 'N/A') flashsta = prihdr.get('FLASHSTA', 'N/A') flashdur = prihdr.get('FLASHDUR', 0.0) if flshfile == 'N/A' or flashdur <= 0: return None if flashsta != 'SUCCESSFUL': warnings.warn('Flash status is {0}'.format(flashsta), AstropyUserWarning) flshfile = from_irafpath(flshfile) ampstring = prihdr['CCDAMP'] with fits.open(flshfile) as hduflash: if ampstring == 'ABCD': flash = np.concatenate( (hduflash['sci', 1].data, hduflash['sci', 2].data[::-1, :]), axis=1) elif ampstring in ('A', 'B', 'AB'): flash = extract_ref(scihdu, hduflash['sci', 2]) else: flash = extract_ref(scihdu, hduflash['sci', 1]) flash = flash * flashdur return flash
Extract flatfield data from ``PFLTFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- invflat : ndarray or `None` Inverse flatfield, if any. Multiply this to apply ``FLATCORR``. def extract_flatfield(prihdr, scihdu): """Extract flatfield data from ``PFLTFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- invflat : ndarray or `None` Inverse flatfield, if any. Multiply this to apply ``FLATCORR``. """ for ff in ['DFLTFILE', 'LFLTFILE']: vv = prihdr.get(ff, 'N/A') if vv != 'N/A': warnings.warn('{0}={1} is not accounted for'.format(ff, vv), AstropyUserWarning) flatfile = prihdr.get('PFLTFILE', 'N/A') if flatfile == 'N/A': return None flatfile = from_irafpath(flatfile) ampstring = prihdr['CCDAMP'] with fits.open(flatfile) as hduflat: if ampstring == 'ABCD': invflat = np.concatenate( (1 / hduflat['sci', 1].data, 1 / hduflat['sci', 2].data[::-1, :]), axis=1) elif ampstring in ('A', 'B', 'AB'): invflat = 1 / extract_ref(scihdu, hduflat['sci', 2]) else: invflat = 1 / extract_ref(scihdu, hduflat['sci', 1]) return invflat
Resolve IRAF path like ``jref$`` into actual file path. Parameters ---------- irafpath : str Path containing IRAF syntax. Returns ------- realpath : str Actual file path. If input does not follow ``path$filename`` format, then this is the same as input. Raises ------ ValueError The required environment variable is undefined. def from_irafpath(irafpath): """Resolve IRAF path like ``jref$`` into actual file path. Parameters ---------- irafpath : str Path containing IRAF syntax. Returns ------- realpath : str Actual file path. If input does not follow ``path$filename`` format, then this is the same as input. Raises ------ ValueError The required environment variable is undefined. """ s = irafpath.split('$') if len(s) != 2: return irafpath if len(s[0]) == 0: return irafpath try: refdir = os.environ[s[0]] except KeyError: raise ValueError('{0} environment variable undefined'.format(s[0])) return os.path.join(refdir, s[1])
Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch. def extract_ref(scihdu, refhdu): """Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch. """ same_size, rx, ry, x0, y0 = find_line(scihdu, refhdu) # Use the whole reference image if same_size: return refhdu.data # Binned data if rx != 1 or ry != 1: raise NotImplementedError( 'Either science or reference data are binned') # Extract a view of the sub-section ny, nx = scihdu.data.shape refdata = refhdu.data[y0:y0+ny, x0:x0+nx] if refdata.shape != (ny, nx): raise ValueError('Extracted reference image is {0} but science image ' 'is {1}'.format(refdata.shape, (ny, nx))) return refdata
Obtain bin factors and corner location to extract and bin the appropriate subset of a reference image to match a science image. If the science image has zero offset and is the same size and binning as the reference image, ``same_size`` will be set to `True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and ``y0`` will be assigned. Normally the science image will be binned the same or more than the reference image. In that case, ``rx`` and ``ry`` will be the bin size of the science image divided by the bin size of the reference image. If the binning of the reference image is greater than the binning of the science image, the ratios (``rx`` and ``ry``) of the bin sizes will be the reference image size divided by the science image bin size. This is not necessarily an error. .. note:: Translated from ``calacs/lib/findbin.c``. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- same_size : bool `True` if zero offset and same size and binning. rx, ry : int Ratio of bin sizes. x0, y0 : int Location of start of subimage in reference image. Raises ------ ValueError Science and reference data size mismatch. def find_line(scihdu, refhdu): """Obtain bin factors and corner location to extract and bin the appropriate subset of a reference image to match a science image. If the science image has zero offset and is the same size and binning as the reference image, ``same_size`` will be set to `True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and ``y0`` will be assigned. Normally the science image will be binned the same or more than the reference image. In that case, ``rx`` and ``ry`` will be the bin size of the science image divided by the bin size of the reference image. If the binning of the reference image is greater than the binning of the science image, the ratios (``rx`` and ``ry``) of the bin sizes will be the reference image size divided by the science image bin size. This is not necessarily an error. .. note:: Translated from ``calacs/lib/findbin.c``. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- same_size : bool `True` if zero offset and same size and binning. rx, ry : int Ratio of bin sizes. x0, y0 : int Location of start of subimage in reference image. Raises ------ ValueError Science and reference data size mismatch. """ sci_bin, sci_corner = get_corner(scihdu.header) ref_bin, ref_corner = get_corner(refhdu.header) # We can use the reference image directly, without binning # and without extracting a subset. if (sci_corner[0] == ref_corner[0] and sci_corner[1] == ref_corner[1] and sci_bin[0] == ref_bin[0] and sci_bin[1] == ref_bin[1] and scihdu.data.shape[1] == refhdu.data.shape[1]): same_size = True rx = 1 ry = 1 x0 = 0 y0 = 0 # Reference image is binned more than the science image. elif ref_bin[0] > sci_bin[0] or ref_bin[1] > sci_bin[1]: same_size = False rx = ref_bin[0] / sci_bin[0] ry = ref_bin[1] / sci_bin[1] x0 = (sci_corner[0] - ref_corner[0]) / ref_bin[0] y0 = (sci_corner[1] - ref_corner[1]) / ref_bin[1] # For subarray input images, whether they are binned or not. else: same_size = False # Ratio of bin sizes. ratiox = sci_bin[0] / ref_bin[0] ratioy = sci_bin[1] / ref_bin[1] if (ratiox * ref_bin[0] != sci_bin[0] or ratioy * ref_bin[1] != sci_bin[1]): raise ValueError('Science and reference data size mismatch') # cshift is the offset in units of unbinned pixels. # Divide by ref_bin to convert to units of pixels in the ref image. cshift = (sci_corner[0] - ref_corner[0], sci_corner[1] - ref_corner[1]) xzero = cshift[0] / ref_bin[0] yzero = cshift[1] / ref_bin[1] if (xzero * ref_bin[0] != cshift[0] or yzero * ref_bin[1] != cshift[1]): warnings.warn('Subimage offset not divisible by bin size', AstropyUserWarning) rx = ratiox ry = ratioy x0 = xzero y0 = yzero # Ensure integer index x0 = int(x0) y0 = int(y0) return same_size, rx, ry, x0, y0
Obtain bin and corner information for a subarray. ``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords are extracted from the given extension header and converted to bin and corner values (0-indexed). ``LTV1`` for the CCD uses the beginning of the illuminated portion as the origin, not the beginning of the overscan region. Thus, the computed X-corner has the same origin as ``LTV1``, which is what we want, but it differs from the ``CENTERA1`` header keyword, which has the beginning of the overscan region as origin. .. note:: Translated from ``calacs/lib/getcorner.c``. Parameters ---------- hdr : obj Extension header. rsize : int, optional Size of reference pixel in units of high-res pixels. Returns ------- bin : tuple of int Pixel size in X and Y. corner : tuple of int Corner of subarray in X and Y. def get_corner(hdr, rsize=1): """Obtain bin and corner information for a subarray. ``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords are extracted from the given extension header and converted to bin and corner values (0-indexed). ``LTV1`` for the CCD uses the beginning of the illuminated portion as the origin, not the beginning of the overscan region. Thus, the computed X-corner has the same origin as ``LTV1``, which is what we want, but it differs from the ``CENTERA1`` header keyword, which has the beginning of the overscan region as origin. .. note:: Translated from ``calacs/lib/getcorner.c``. Parameters ---------- hdr : obj Extension header. rsize : int, optional Size of reference pixel in units of high-res pixels. Returns ------- bin : tuple of int Pixel size in X and Y. corner : tuple of int Corner of subarray in X and Y. """ ltm, ltv = get_lt(hdr) return from_lt(rsize, ltm, ltv)
Obtain the LTV and LTM keyword values. Note that this returns the values just as read from the header, which means in particular that the LTV values are for one-indexed pixel coordinates. LTM keywords are the diagonal elements of MWCS linear transformation matrix, while LTV's are MWCS linear transformation vector (1-indexed). .. note:: Translated from ``calacs/lib/getlt.c``. Parameters ---------- hdr : obj Extension header. Returns ------- ltm, ltv : tuple of float ``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``. Values are ``(1, 1)`` and ``(0, 0)`` if not found, to accomodate reference files with missing info. Raises ------ ValueError Invalid LTM* values. def get_lt(hdr): """Obtain the LTV and LTM keyword values. Note that this returns the values just as read from the header, which means in particular that the LTV values are for one-indexed pixel coordinates. LTM keywords are the diagonal elements of MWCS linear transformation matrix, while LTV's are MWCS linear transformation vector (1-indexed). .. note:: Translated from ``calacs/lib/getlt.c``. Parameters ---------- hdr : obj Extension header. Returns ------- ltm, ltv : tuple of float ``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``. Values are ``(1, 1)`` and ``(0, 0)`` if not found, to accomodate reference files with missing info. Raises ------ ValueError Invalid LTM* values. """ ltm = (hdr.get('LTM1_1', 1.0), hdr.get('LTM2_2', 1.0)) if ltm[0] <= 0 or ltm[1] <= 0: raise ValueError('(LTM1_1, LTM2_2) = {0} is invalid'.format(ltm)) ltv = (hdr.get('LTV1', 0.0), hdr.get('LTV2', 0.0)) return ltm, ltv
Compute the corner location and pixel size in units of unbinned pixels. .. note:: Translated from ``calacs/lib/fromlt.c``. Parameters ---------- rsize : int Reference pixel size. Usually 1. ltm, ltv : tuple of float See :func:`get_lt`. Returns ------- bin : tuple of int Pixel size in X and Y. corner : tuple of int Corner of subarray in X and Y. def from_lt(rsize, ltm, ltv): """Compute the corner location and pixel size in units of unbinned pixels. .. note:: Translated from ``calacs/lib/fromlt.c``. Parameters ---------- rsize : int Reference pixel size. Usually 1. ltm, ltv : tuple of float See :func:`get_lt`. Returns ------- bin : tuple of int Pixel size in X and Y. corner : tuple of int Corner of subarray in X and Y. """ dbinx = rsize / ltm[0] dbiny = rsize / ltm[1] dxcorner = (dbinx - rsize) - dbinx * ltv[0] dycorner = (dbiny - rsize) - dbiny * ltv[1] # Round off to the nearest integer. bin = (_nint(dbinx), _nint(dbiny)) corner = (_nint(dxcorner), _nint(dycorner)) return bin, corner
Retrieve header keyword values from RAW and SPT FITS files to pass on to :func:`check_oscntab` and :func:`check_overscan`. Parameters ---------- root : str Rootname of the observation. Can be relative path to the file excluding the type of FITS file and extension, e.g., '/my/path/jxxxxxxxq'. Returns ------- ccdamp : str Amplifiers used to read out the CCDs. xstart : int Starting column of the readout in detector coordinates. ystart : int Starting row of the readout in detector coordinates. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. def hdr_vals_for_overscan(root): """Retrieve header keyword values from RAW and SPT FITS files to pass on to :func:`check_oscntab` and :func:`check_overscan`. Parameters ---------- root : str Rootname of the observation. Can be relative path to the file excluding the type of FITS file and extension, e.g., '/my/path/jxxxxxxxq'. Returns ------- ccdamp : str Amplifiers used to read out the CCDs. xstart : int Starting column of the readout in detector coordinates. ystart : int Starting row of the readout in detector coordinates. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. """ with fits.open(root + '_spt.fits') as hdu: spthdr = hdu[0].header with fits.open(root + '_raw.fits') as hdu: prihdr = hdu[0].header xstart = spthdr['SS_A1CRN'] ystart = spthdr['SS_A2CRN'] xsize = spthdr['SS_A1SZE'] ysize = spthdr['SS_A2SZE'] ccdamp = prihdr['CCDAMP'] return ccdamp, xstart, ystart, xsize, ysize
Check if the supplied parameters are in the ``OSCNTAB`` reference file. .. note:: Even if an entry does not exist in ``OSCNTAB``, as long as the subarray does not have any overscan, it should not be a problem for CALACS. .. note:: This function does not check the virtual bias rows. Parameters ---------- oscntab : str Path to the ``OSCNTAB`` reference file being checked against. ccdamp : str Amplifier(s) used to read out the CCDs. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. leading : int Number of columns in the bias section ("TRIMX1" to be trimmed off by ``BLEVCORR``) on the A/C amplifiers side of the CCDs. trailing : int Number of columns in the bias section ("TRIMX2" to be trimmed off by ``BLEVCORR``) on the B/D amplifiers side of the CCDs. Returns ------- supported : bool Result of test if input parameters are in ``OSCNTAB``. def check_oscntab(oscntab, ccdamp, xsize, ysize, leading, trailing): """Check if the supplied parameters are in the ``OSCNTAB`` reference file. .. note:: Even if an entry does not exist in ``OSCNTAB``, as long as the subarray does not have any overscan, it should not be a problem for CALACS. .. note:: This function does not check the virtual bias rows. Parameters ---------- oscntab : str Path to the ``OSCNTAB`` reference file being checked against. ccdamp : str Amplifier(s) used to read out the CCDs. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. leading : int Number of columns in the bias section ("TRIMX1" to be trimmed off by ``BLEVCORR``) on the A/C amplifiers side of the CCDs. trailing : int Number of columns in the bias section ("TRIMX2" to be trimmed off by ``BLEVCORR``) on the B/D amplifiers side of the CCDs. Returns ------- supported : bool Result of test if input parameters are in ``OSCNTAB``. """ tab = Table.read(oscntab) ccdamp = ccdamp.lower().rstrip() for row in tab: if (row['CCDAMP'].lower().rstrip() in ccdamp and row['NX'] == xsize and row['NY'] == ysize and row['TRIMX1'] == leading and row['TRIMX2'] == trailing): return True return False
Check image for bias columns. Parameters ---------- xstart : int Starting column of the readout in detector coordinates. xsize : int Number of columns in the readout. total_prescan_pixels : int Total prescan pixels for a single amplifier on a detector. Default is 24 for WFC. total_science_pixels : int Total science pixels across a detector. Default is 4096 for WFC (across two amplifiers). Returns ------- hasoverscan : bool Indication if there are bias columns in the image. leading : int Number of bias columns on the A/C amplifiers side of the CCDs ("TRIMX1" in ``OSCNTAB``). trailing : int Number of bias columns on the B/D amplifiers side of the CCDs ("TRIMX2" in ``OSCNTAB``). def check_overscan(xstart, xsize, total_prescan_pixels=24, total_science_pixels=4096): """Check image for bias columns. Parameters ---------- xstart : int Starting column of the readout in detector coordinates. xsize : int Number of columns in the readout. total_prescan_pixels : int Total prescan pixels for a single amplifier on a detector. Default is 24 for WFC. total_science_pixels : int Total science pixels across a detector. Default is 4096 for WFC (across two amplifiers). Returns ------- hasoverscan : bool Indication if there are bias columns in the image. leading : int Number of bias columns on the A/C amplifiers side of the CCDs ("TRIMX1" in ``OSCNTAB``). trailing : int Number of bias columns on the B/D amplifiers side of the CCDs ("TRIMX2" in ``OSCNTAB``). """ hasoverscan = False leading = 0 trailing = 0 if xstart < total_prescan_pixels: hasoverscan = True leading = abs(xstart - total_prescan_pixels) if (xstart + xsize) > total_science_pixels: hasoverscan = True trailing = abs(total_science_pixels - (xstart + xsize - total_prescan_pixels)) return hasoverscan, leading, trailing
Ignore unnecessary actions for static file requests, posts, or ajax requests. We're only interested in redirecting following a 'natural' request redirection to the `wagtailadmin_explore_root` or `wagtailadmin_explore` views. def process_request(self, request): """ Ignore unnecessary actions for static file requests, posts, or ajax requests. We're only interested in redirecting following a 'natural' request redirection to the `wagtailadmin_explore_root` or `wagtailadmin_explore` views. """ referer_url = request.META.get('HTTP_REFERER') return_to_index_url = request.session.get('return_to_index_url') try: if all(( return_to_index_url, referer_url, request.method == 'GET', not request.is_ajax(), resolve(request.path).url_name in ('wagtailadmin_explore_root', 'wagtailadmin_explore'), )): perform_redirection = False referer_match = resolve(urlparse(referer_url).path) if all(( referer_match.namespace == 'wagtailadmin_pages', referer_match.url_name in ( 'add', 'edit', 'delete', 'unpublish', 'copy' ), )): perform_redirection = True elif all(( not referer_match.namespace, referer_match.url_name in ( 'wagtailadmin_pages_create', 'wagtailadmin_pages_edit', 'wagtailadmin_pages_delete', 'wagtailadmin_pages_unpublish' ), )): perform_redirection = True if perform_redirection: del request.session['return_to_index_url'] return HttpResponseRedirect(return_to_index_url) except Resolver404: pass return None
r""" Run the acs2d.e executable as from the shell. Output is automatically named based on input suffix: +--------------------+----------------+------------------------------+ | INPUT | OUTPUT | EXPECTED DATA | +====================+================+==============================+ | ``*_raw.fits`` | ``*_flt.fits`` | SBC image. | +--------------------+----------------+------------------------------+ | ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. | +--------------------+----------------+------------------------------+ | ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. | +--------------------+----------------+------------------------------+ | ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. | +--------------------+----------------+------------------------------+ | ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. | +--------------------+----------------+------------------------------+ Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blv_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blv_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACS2D executable. If not given, run ACS2D given by 'acs2d.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False, exe_args=None): r""" Run the acs2d.e executable as from the shell. Output is automatically named based on input suffix: +--------------------+----------------+------------------------------+ | INPUT | OUTPUT | EXPECTED DATA | +====================+================+==============================+ | ``*_raw.fits`` | ``*_flt.fits`` | SBC image. | +--------------------+----------------+------------------------------+ | ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. | +--------------------+----------------+------------------------------+ | ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. | +--------------------+----------------+------------------------------+ | ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. | +--------------------+----------------+------------------------------+ | ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. | +--------------------+----------------+------------------------------+ Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blv_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blv_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACS2D executable. If not given, run ACS2D given by 'acs2d.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acs2d.e'] # Parse input to get list of filenames to process. # acs2d.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
TEAL interface for the `acs2d` function. def run(configobj=None): """ TEAL interface for the `acs2d` function. """ acs2d(configobj['input'], exec_path=configobj['exec_path'], time_stamps=configobj['time_stamps'], verbose=configobj['verbose'], quiet=configobj['quiet'] )
TEAL interface for the `acscte` function. def run(configobj=None): """ TEAL interface for the `acscte` function. """ acscte(configobj['input'], exec_path=configobj['exec_path'], time_stamps=configobj['time_stamps'], verbose=configobj['verbose'], quiet=configobj['quiet'], single_core=configobj['single_core'] )
Check the inputs to ensure they are valid. Returns ------- status : bool True if all inputs are valid, False if one is not. def _check_inputs(self): """Check the inputs to ensure they are valid. Returns ------- status : bool True if all inputs are valid, False if one is not. """ valid_detector = True valid_filter = True valid_date = True # Determine the submitted detector is valid if self.detector not in self._valid_detectors: msg = ('{} is not a valid detector option.\n' 'Please choose one of the following:\n{}\n' '{}'.format(self.detector, '\n'.join(self._valid_detectors), self._msg_div)) LOG.error(msg) valid_detector = False # Determine if the submitted filter is valid if (self.filt is not None and valid_detector and self.filt not in self.valid_filters[self.detector]): msg = ('{} is not a valid filter for {}\n' 'Please choose one of the following:\n{}\n' '{}'.format(self.filt, self.detector, '\n'.join(self.valid_filters[self.detector]), self._msg_div)) LOG.error(msg) valid_filter = False # Determine if the submitted date is valid date_check = self._check_date() if date_check is not None: LOG.error('{}\n{}'.format(date_check, self._msg_div)) valid_date = False if not valid_detector or not valid_filter or not valid_date: return False return True
Convenience method for determining if the input date is valid. Parameters ---------- fmt : str The format of the date string. The default is ``%Y-%m-%d``, which corresponds to ``YYYY-MM-DD``. Returns ------- status : str or `None` If the date is valid, returns `None`. If the date is invalid, returns a message explaining the issue. def _check_date(self, fmt='%Y-%m-%d'): """Convenience method for determining if the input date is valid. Parameters ---------- fmt : str The format of the date string. The default is ``%Y-%m-%d``, which corresponds to ``YYYY-MM-DD``. Returns ------- status : str or `None` If the date is valid, returns `None`. If the date is invalid, returns a message explaining the issue. """ result = None try: dt_obj = dt.datetime.strptime(self.date, fmt) except ValueError: result = '{} does not match YYYY-MM-DD format'.format(self.date) else: if dt_obj < self._acs_installation_date: result = ('The observation date cannot occur ' 'before ACS was installed ({})' .format(self._acs_installation_date.strftime(fmt))) elif dt_obj > self._extrapolation_date: result = ('The observation date cannot occur after the ' 'maximum allowable date, {}. Extrapolations of the ' 'instrument throughput after this date lead to ' 'high uncertainties and are therefore invalid.' .format(self._extrapolation_date.strftime(fmt))) finally: return result
Submit a request to the ACS Zeropoint Calculator. If an exception is raised during the request, an error message is given. Otherwise, the response is saved in the corresponding attribute. def _submit_request(self): """Submit a request to the ACS Zeropoint Calculator. If an exception is raised during the request, an error message is given. Otherwise, the response is saved in the corresponding attribute. """ try: self._response = urlopen(self._url) except URLError as e: msg = ('{}\n{}\nThe query failed! Please check your inputs. ' 'If the error persists, submit a ticket to the ' 'ACS Help Desk at hsthelp.stsci.edu with the error message ' 'displayed above.'.format(str(e), self._msg_div)) LOG.error(msg) self._failed = True else: self._failed = False
Parse and format the results returned by the ACS Zeropoint Calculator. Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in the response. Format the results into an astropy.table.QTable with corresponding units and assign it to the zpt_table attribute. def _parse_and_format(self): """ Parse and format the results returned by the ACS Zeropoint Calculator. Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in the response. Format the results into an astropy.table.QTable with corresponding units and assign it to the zpt_table attribute. """ soup = BeautifulSoup(self._response.read(), 'html.parser') # Grab all elements in the table returned by the ZPT calc. td = soup.find_all('td') # Remove the units attached to PHOTFLAM and PHOTPLAM column names. td = [val.text.split(' ')[0] for val in td] # Turn the single list into a 2-D numpy array data = np.reshape(td, (int(len(td) / self._block_size), self._block_size)) # Create the QTable, note that sometimes self._response will be empty # even though the return was successful; hence the try/except to catch # any potential index errors. Provide the user with a message and # set the zpt_table to None. try: tab = QTable(data[1:, :], names=data[0], dtype=[str, float, float, float, float, float]) except IndexError as e: msg = ('{}\n{}\n There was an issue parsing the request. ' 'Try resubmitting the query. If this issue persists, please ' 'submit a ticket to the Help Desk at' 'https://stsci.service-now.com/hst' .format(e, self._msg_div)) LOG.info(msg) self._zpt_table = None else: # If and only if no exception was raised, attach the units to each # column of the QTable. Note we skip the FILTER column because # Quantity objects in astropy must be numerical (i.e. not str) for col in tab.colnames: if col.lower() == 'filter': continue tab[col].unit = self._data_units[col] self._zpt_table = tab
Submit the request to the ACS Zeropoints Calculator. This method will: * submit the request * parse the response * format the results into a table with the correct units Returns ------- tab : `astropy.table.QTable` or `None` If the request was successful, returns a table; otherwise, `None`. def fetch(self): """Submit the request to the ACS Zeropoints Calculator. This method will: * submit the request * parse the response * format the results into a table with the correct units Returns ------- tab : `astropy.table.QTable` or `None` If the request was successful, returns a table; otherwise, `None`. """ LOG.info('Checking inputs...') valid_inputs = self._check_inputs() if valid_inputs: LOG.info('Submitting request to {}'.format(self._url)) self._submit_request() if self._failed: return LOG.info('Parsing the response and formatting the results...') self._parse_and_format() return self.zpt_table LOG.error('Please fix the incorrect input(s)')
Returns a QuerySet of all model instances that can be edited by the admin site. def get_queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. """ qs = self.model._default_manager.get_queryset() ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs
Instantiates a class-based view to provide listing functionality for the assigned model. The view class used can be overridden by changing the 'index_view_class' attribute. def index_view(self, request): """ Instantiates a class-based view to provide listing functionality for the assigned model. The view class used can be overridden by changing the 'index_view_class' attribute. """ kwargs = {'model_admin': self} view_class = self.index_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide 'creation' functionality for the assigned model, or redirect to Wagtail's create view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'create_view_class' attribute. def create_view(self, request): """ Instantiates a class-based view to provide 'creation' functionality for the assigned model, or redirect to Wagtail's create view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'create_view_class' attribute. """ kwargs = {'model_admin': self} view_class = self.create_view_class return view_class.as_view(**kwargs)(request)