body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f79c21abfdba4d8fbe002038a240e75dc58fe6602fd61d1209887ea37aca63cc
def build_encoder(model_name, logger=None): 'Builds encoder module by model name.' if (model_name not in MODEL_POOL): raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!') gan_type = model_name.split('_')[0] if (gan_type == 'styleganinv'): return StyleGANEncoder(model_name, logger=logger) raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
Builds encoder module by model name.
models/helper.py
build_encoder
Tommy-Ngx/In-domainGAN
319
python
def build_encoder(model_name, logger=None): if (model_name not in MODEL_POOL): raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!') gan_type = model_name.split('_')[0] if (gan_type == 'styleganinv'): return StyleGANEncoder(model_name, logger=logger) raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
def build_encoder(model_name, logger=None): if (model_name not in MODEL_POOL): raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!') gan_type = model_name.split('_')[0] if (gan_type == 'styleganinv'): return StyleGANEncoder(model_name, logger=logger) raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')<|docstring|>Builds encoder module by model name.<|endoftext|>
c5f8f2aeb968c5ec13b4157553fe10042d2cd6541d6c8697faade68c3c8767b7
def unpack_feature(byte_arr: bytearray) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Unpack the flatten feature (in byte array format) from c++\n\n Parameters\n ----------\n byte_arr: bytearray\n The two-dimensional feature vector in serialized byte array format\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' vec_len = DEFAULT_FEATURE_VEC_LEN offset = 0 n = struct.unpack_from('1i', byte_arr, offset=offset)[0] offset += SIZE_OF_INT32 sizes = struct.unpack_from(('%di' % (n + 2)), byte_arr, offset=offset) offset += (SIZE_OF_INT32 * (n + 2)) features = [] for size in sizes[:(- 2)]: row = [] if (size == 0): features.append(np.zeros((1, vec_len))) else: n_stmts = struct.unpack_from('f', byte_arr, offset=offset) offset += SIZE_OF_FLOAT32 n_stmts = int((n_stmts[0] + 0.5)) tmp_vec_len = ((size - 1) // n_stmts) assert (tmp_vec_len == vec_len), ('The lenght of feature vector is wrong. Expected %d but got %d.' % (vec_len, tmp_vec_len)) assert ((tmp_vec_len * n_stmts) == (size - 1)) for _ in range(n_stmts): x = struct.unpack_from(('%df' % vec_len), byte_arr, offset=offset) offset += (vec_len * SIZE_OF_FLOAT32) row.append(x) features.append(np.array(row)) m = sizes[(- 2)] normalized_throughputs = struct.unpack_from(('%df' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) m = sizes[(- 1)] task_ids = struct.unpack_from(('%di' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) assert (offset == len(byte_arr)), ('%d vs %d' % (offset, len(byte_arr))) return (np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids))
Unpack the flatten feature (in byte array format) from c++ Parameters ---------- byte_arr: bytearray The two-dimensional feature vector in serialized byte array format Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids
python/tvm/auto_scheduler/feature.py
unpack_feature
iswariyam/incubator-tvm
0
python
def unpack_feature(byte_arr: bytearray) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Unpack the flatten feature (in byte array format) from c++\n\n Parameters\n ----------\n byte_arr: bytearray\n The two-dimensional feature vector in serialized byte array format\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' vec_len = DEFAULT_FEATURE_VEC_LEN offset = 0 n = struct.unpack_from('1i', byte_arr, offset=offset)[0] offset += SIZE_OF_INT32 sizes = struct.unpack_from(('%di' % (n + 2)), byte_arr, offset=offset) offset += (SIZE_OF_INT32 * (n + 2)) features = [] for size in sizes[:(- 2)]: row = [] if (size == 0): features.append(np.zeros((1, vec_len))) else: n_stmts = struct.unpack_from('f', byte_arr, offset=offset) offset += SIZE_OF_FLOAT32 n_stmts = int((n_stmts[0] + 0.5)) tmp_vec_len = ((size - 1) // n_stmts) assert (tmp_vec_len == vec_len), ('The lenght of feature vector is wrong. Expected %d but got %d.' % (vec_len, tmp_vec_len)) assert ((tmp_vec_len * n_stmts) == (size - 1)) for _ in range(n_stmts): x = struct.unpack_from(('%df' % vec_len), byte_arr, offset=offset) offset += (vec_len * SIZE_OF_FLOAT32) row.append(x) features.append(np.array(row)) m = sizes[(- 2)] normalized_throughputs = struct.unpack_from(('%df' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) m = sizes[(- 1)] task_ids = struct.unpack_from(('%di' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) assert (offset == len(byte_arr)), ('%d vs %d' % (offset, len(byte_arr))) return (np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids))
def unpack_feature(byte_arr: bytearray) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Unpack the flatten feature (in byte array format) from c++\n\n Parameters\n ----------\n byte_arr: bytearray\n The two-dimensional feature vector in serialized byte array format\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' vec_len = DEFAULT_FEATURE_VEC_LEN offset = 0 n = struct.unpack_from('1i', byte_arr, offset=offset)[0] offset += SIZE_OF_INT32 sizes = struct.unpack_from(('%di' % (n + 2)), byte_arr, offset=offset) offset += (SIZE_OF_INT32 * (n + 2)) features = [] for size in sizes[:(- 2)]: row = [] if (size == 0): features.append(np.zeros((1, vec_len))) else: n_stmts = struct.unpack_from('f', byte_arr, offset=offset) offset += SIZE_OF_FLOAT32 n_stmts = int((n_stmts[0] + 0.5)) tmp_vec_len = ((size - 1) // n_stmts) assert (tmp_vec_len == vec_len), ('The lenght of feature vector is wrong. Expected %d but got %d.' % (vec_len, tmp_vec_len)) assert ((tmp_vec_len * n_stmts) == (size - 1)) for _ in range(n_stmts): x = struct.unpack_from(('%df' % vec_len), byte_arr, offset=offset) offset += (vec_len * SIZE_OF_FLOAT32) row.append(x) features.append(np.array(row)) m = sizes[(- 2)] normalized_throughputs = struct.unpack_from(('%df' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) m = sizes[(- 1)] task_ids = struct.unpack_from(('%di' % m), byte_arr, offset=offset) offset += (m * SIZE_OF_INT32) assert (offset == len(byte_arr)), ('%d vs %d' % (offset, len(byte_arr))) return (np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids))<|docstring|>Unpack the flatten feature (in byte array format) from c++ Parameters ---------- byte_arr: bytearray The two-dimensional feature vector in serialized byte array format Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids<|endoftext|>
1132d482447cc144a32cda645733b5b1acbc8e3cbc1c1d867fac0e373304a989
def get_per_store_features_from_file(filename: str, max_lines: int, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from a log file\n\n Parameters\n ----------\n filename: str\n The input filename\n max_lines: int\n Only extract the first n lines of the file\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromFile(filename, max_lines, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)
Get per_store features from a log file Parameters ---------- filename: str The input filename max_lines: int Only extract the first n lines of the file max_n_bufs: Optional[int] The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids
python/tvm/auto_scheduler/feature.py
get_per_store_features_from_file
iswariyam/incubator-tvm
0
python
def get_per_store_features_from_file(filename: str, max_lines: int, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from a log file\n\n Parameters\n ----------\n filename: str\n The input filename\n max_lines: int\n Only extract the first n lines of the file\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromFile(filename, max_lines, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)
def get_per_store_features_from_file(filename: str, max_lines: int, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from a log file\n\n Parameters\n ----------\n filename: str\n The input filename\n max_lines: int\n Only extract the first n lines of the file\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromFile(filename, max_lines, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)<|docstring|>Get per_store features from a log file Parameters ---------- filename: str The input filename max_lines: int Only extract the first n lines of the file max_n_bufs: Optional[int] The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids<|endoftext|>
45b6fa27f19e651cc5d40f134254482e2fc06065e46268e85c0772b87843710d
def get_per_store_features_from_measure_pairs(inputs: List[MeasureInput], results: List[MeasureResult], skip_first_n_feature_extraction: int=0, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n inputs: List[MeasureInput]\n The measure inputs\n results: List[MeasureResult]\n The measure results\n skip_first_n_feature_extraction: int\n Skip feature extraction for the first n states\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs(inputs, results, skip_first_n_feature_extraction, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)
Get per_store features from measurement input/result pairs Parameters ---------- inputs: List[MeasureInput] The measure inputs results: List[MeasureResult] The measure results skip_first_n_feature_extraction: int Skip feature extraction for the first n states max_n_bufs: int The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids
python/tvm/auto_scheduler/feature.py
get_per_store_features_from_measure_pairs
iswariyam/incubator-tvm
0
python
def get_per_store_features_from_measure_pairs(inputs: List[MeasureInput], results: List[MeasureResult], skip_first_n_feature_extraction: int=0, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n inputs: List[MeasureInput]\n The measure inputs\n results: List[MeasureResult]\n The measure results\n skip_first_n_feature_extraction: int\n Skip feature extraction for the first n states\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs(inputs, results, skip_first_n_feature_extraction, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)
def get_per_store_features_from_measure_pairs(inputs: List[MeasureInput], results: List[MeasureResult], skip_first_n_feature_extraction: int=0, max_n_bufs: Optional[int]=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n inputs: List[MeasureInput]\n The measure inputs\n results: List[MeasureResult]\n The measure results\n skip_first_n_feature_extraction: int\n Skip feature extraction for the first n states\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs(inputs, results, skip_first_n_feature_extraction, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)<|docstring|>Get per_store features from measurement input/result pairs Parameters ---------- inputs: List[MeasureInput] The measure inputs results: List[MeasureResult] The measure results skip_first_n_feature_extraction: int Skip feature extraction for the first n states max_n_bufs: int The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids<|endoftext|>
899e02781e3c609c23dbdceab8d48642b6e63c2482eddb1e9929248d74b63c50
def get_per_store_features_from_states(states: List[Union[(State, StateObject)]], task: 'SearchTask', max_n_bufs: Optional[int]=None) -> List[np.ndarray]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n states: List[Union[State, StateObject]]\n The input states\n task: SearchTask\n The search task of the input states\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' if isinstance(states[0], State): state_objects = [s.state_object for s in states] elif isinstance(states[0], StateObject): state_objects = states byte_arr = _ffi_api.GetPerStoreFeaturesFromStates(state_objects, task, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)[0]
Get per_store features from measurement input/result pairs Parameters ---------- states: List[Union[State, StateObject]] The input states task: SearchTask The search task of the input states max_n_bufs: Optional[int] The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids
python/tvm/auto_scheduler/feature.py
get_per_store_features_from_states
iswariyam/incubator-tvm
0
python
def get_per_store_features_from_states(states: List[Union[(State, StateObject)]], task: 'SearchTask', max_n_bufs: Optional[int]=None) -> List[np.ndarray]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n states: List[Union[State, StateObject]]\n The input states\n task: SearchTask\n The search task of the input states\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' if isinstance(states[0], State): state_objects = [s.state_object for s in states] elif isinstance(states[0], StateObject): state_objects = states byte_arr = _ffi_api.GetPerStoreFeaturesFromStates(state_objects, task, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)[0]
def get_per_store_features_from_states(states: List[Union[(State, StateObject)]], task: 'SearchTask', max_n_bufs: Optional[int]=None) -> List[np.ndarray]: 'Get per_store features from measurement input/result pairs\n\n Parameters\n ----------\n states: List[Union[State, StateObject]]\n The input states\n task: SearchTask\n The search task of the input states\n max_n_bufs: Optional[int]\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n features: np.ndarray\n Feature vectors\n normalized_throughputs: np.ndarray\n Normalized throughputs\n task_ids: np.ndarray\n Task ids\n ' if isinstance(states[0], State): state_objects = [s.state_object for s in states] elif isinstance(states[0], StateObject): state_objects = states byte_arr = _ffi_api.GetPerStoreFeaturesFromStates(state_objects, task, (max_n_bufs or DEFAULT_MAX_N_BUFS)) return unpack_feature(byte_arr)[0]<|docstring|>Get per_store features from measurement input/result pairs Parameters ---------- states: List[Union[State, StateObject]] The input states task: SearchTask The search task of the input states max_n_bufs: Optional[int] The maximum number of extracted buffers for one statement Returns ------- features: np.ndarray Feature vectors normalized_throughputs: np.ndarray Normalized throughputs task_ids: np.ndarray Task ids<|endoftext|>
a2af60d1fc28e9fda4ecbf750d9a6804501a42c240ec3c953f0c473647cd8a60
def get_per_store_feature_names(max_n_bufs: Optional[int]=None) -> List[str]: 'Get the name of every element in the feature vector. Use this for debug and inspection.\n\n Parameters\n ----------\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n names: List[str]\n The names of elements in the flatten feature vector\n ' return _ffi_api.GetPerStoreFeatureNames((max_n_bufs or DEFAULT_MAX_N_BUFS))
Get the name of every element in the feature vector. Use this for debug and inspection. Parameters ---------- max_n_bufs: int The maximum number of extracted buffers for one statement Returns ------- names: List[str] The names of elements in the flatten feature vector
python/tvm/auto_scheduler/feature.py
get_per_store_feature_names
iswariyam/incubator-tvm
0
python
def get_per_store_feature_names(max_n_bufs: Optional[int]=None) -> List[str]: 'Get the name of every element in the feature vector. Use this for debug and inspection.\n\n Parameters\n ----------\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n names: List[str]\n The names of elements in the flatten feature vector\n ' return _ffi_api.GetPerStoreFeatureNames((max_n_bufs or DEFAULT_MAX_N_BUFS))
def get_per_store_feature_names(max_n_bufs: Optional[int]=None) -> List[str]: 'Get the name of every element in the feature vector. Use this for debug and inspection.\n\n Parameters\n ----------\n max_n_bufs: int\n The maximum number of extracted buffers for one statement\n\n Returns\n -------\n names: List[str]\n The names of elements in the flatten feature vector\n ' return _ffi_api.GetPerStoreFeatureNames((max_n_bufs or DEFAULT_MAX_N_BUFS))<|docstring|>Get the name of every element in the feature vector. Use this for debug and inspection. Parameters ---------- max_n_bufs: int The maximum number of extracted buffers for one statement Returns ------- names: List[str] The names of elements in the flatten feature vector<|endoftext|>
309f87ab8d79d88c7154f6ed9b187da34bef2ed52c6827cdf06c2dc0504bee25
def __init__(self): ' consctruct new config object\n ' self.last_updated = 0 self._conf_handle = None self._update()
consctruct new config object
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
__init__
ServiusHack/core
2,109
python
def __init__(self): ' \n ' self.last_updated = 0 self._conf_handle = None self._update()
def __init__(self): ' \n ' self.last_updated = 0 self._conf_handle = None self._update()<|docstring|>consctruct new config object<|endoftext|>
8f2836743a1bb191d8ae2e6bc423461f4514ba6461c6274c21937bf3e80a6e8c
def _update(self): ' check if config is changed and (re)load\n ' mod_time = os.stat(self._cnf_filename)[stat.ST_MTIME] if (os.path.exists(self._cnf_filename) and (self.last_updated != mod_time)): self._conf_handle = ConfigParser() self._conf_handle.read(self._cnf_filename) self.last_updated = mod_time
check if config is changed and (re)load
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
_update
ServiusHack/core
2,109
python
def _update(self): ' \n ' mod_time = os.stat(self._cnf_filename)[stat.ST_MTIME] if (os.path.exists(self._cnf_filename) and (self.last_updated != mod_time)): self._conf_handle = ConfigParser() self._conf_handle.read(self._cnf_filename) self.last_updated = mod_time
def _update(self): ' \n ' mod_time = os.stat(self._cnf_filename)[stat.ST_MTIME] if (os.path.exists(self._cnf_filename) and (self.last_updated != mod_time)): self._conf_handle = ConfigParser() self._conf_handle.read(self._cnf_filename) self.last_updated = mod_time<|docstring|>check if config is changed and (re)load<|endoftext|>
5b321ace790966a35077af8775123df2bd50a996745ff04b46efe560cea5e829
def get_zones(self): ' return list of configured zones\n :return: dictionary index by zoneid, containing dictionaries with zone properties\n ' result = dict() self._update() if (self._conf_handle is not None): for section in self._conf_handle.sections(): if (section.find('zone_') == 0): zoneid = section.split('_')[1] result[zoneid] = dict() for item in self._conf_handle.items(section): result[zoneid][item[0]] = item[1] if (('allowedaddresses' in result[zoneid]) and (result[zoneid]['allowedaddresses'].strip() != '')): result[zoneid]['allowedaddresses'] = [x.strip() for x in result[zoneid]['allowedaddresses'].split(',')] else: result[zoneid]['allowedaddresses'] = list() if (('allowedmacaddresses' in result[zoneid]) and (result[zoneid]['allowedmacaddresses'].strip() != '')): result[zoneid]['allowedmacaddresses'] = [x.strip() for x in result[zoneid]['allowedmacaddresses'].split(',')] else: result[zoneid]['allowedmacaddresses'] = list() return result
return list of configured zones :return: dictionary index by zoneid, containing dictionaries with zone properties
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
get_zones
ServiusHack/core
2,109
python
def get_zones(self): ' return list of configured zones\n :return: dictionary index by zoneid, containing dictionaries with zone properties\n ' result = dict() self._update() if (self._conf_handle is not None): for section in self._conf_handle.sections(): if (section.find('zone_') == 0): zoneid = section.split('_')[1] result[zoneid] = dict() for item in self._conf_handle.items(section): result[zoneid][item[0]] = item[1] if (('allowedaddresses' in result[zoneid]) and (result[zoneid]['allowedaddresses'].strip() != )): result[zoneid]['allowedaddresses'] = [x.strip() for x in result[zoneid]['allowedaddresses'].split(',')] else: result[zoneid]['allowedaddresses'] = list() if (('allowedmacaddresses' in result[zoneid]) and (result[zoneid]['allowedmacaddresses'].strip() != )): result[zoneid]['allowedmacaddresses'] = [x.strip() for x in result[zoneid]['allowedmacaddresses'].split(',')] else: result[zoneid]['allowedmacaddresses'] = list() return result
def get_zones(self): ' return list of configured zones\n :return: dictionary index by zoneid, containing dictionaries with zone properties\n ' result = dict() self._update() if (self._conf_handle is not None): for section in self._conf_handle.sections(): if (section.find('zone_') == 0): zoneid = section.split('_')[1] result[zoneid] = dict() for item in self._conf_handle.items(section): result[zoneid][item[0]] = item[1] if (('allowedaddresses' in result[zoneid]) and (result[zoneid]['allowedaddresses'].strip() != )): result[zoneid]['allowedaddresses'] = [x.strip() for x in result[zoneid]['allowedaddresses'].split(',')] else: result[zoneid]['allowedaddresses'] = list() if (('allowedmacaddresses' in result[zoneid]) and (result[zoneid]['allowedmacaddresses'].strip() != )): result[zoneid]['allowedmacaddresses'] = [x.strip() for x in result[zoneid]['allowedmacaddresses'].split(',')] else: result[zoneid]['allowedmacaddresses'] = list() return result<|docstring|>return list of configured zones :return: dictionary index by zoneid, containing dictionaries with zone properties<|endoftext|>
9c4f7967aad3f2cc131ea885ea6aa661da09de385d679fd72d5bf35099f6aca3
def fetch_template_data(self, zoneid): ' fetch template content from config\n ' for section in self._conf_handle.sections(): if ((section.find('template_for_zone_') == 0) and (section.split('_')[(- 1)] == str(zoneid))): if self._conf_handle.has_option(section, 'content'): return self._conf_handle.get(section, 'content') return None
fetch template content from config
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
fetch_template_data
ServiusHack/core
2,109
python
def fetch_template_data(self, zoneid): ' \n ' for section in self._conf_handle.sections(): if ((section.find('template_for_zone_') == 0) and (section.split('_')[(- 1)] == str(zoneid))): if self._conf_handle.has_option(section, 'content'): return self._conf_handle.get(section, 'content') return None
def fetch_template_data(self, zoneid): ' \n ' for section in self._conf_handle.sections(): if ((section.find('template_for_zone_') == 0) and (section.split('_')[(- 1)] == str(zoneid))): if self._conf_handle.has_option(section, 'content'): return self._conf_handle.get(section, 'content') return None<|docstring|>fetch template content from config<|endoftext|>
7ff47eba6a91f58f071c21d9df1e2de628ac5193ec79f6d6cc138ad6f8932252
def load_config(self): ' load config.xml\n ' tree = xml.etree.ElementTree.parse('/conf/config.xml') self.rootNode = tree.getroot()
load config.xml
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
load_config
ServiusHack/core
2,109
python
def load_config(self): ' \n ' tree = xml.etree.ElementTree.parse('/conf/config.xml') self.rootNode = tree.getroot()
def load_config(self): ' \n ' tree = xml.etree.ElementTree.parse('/conf/config.xml') self.rootNode = tree.getroot()<|docstring|>load config.xml<|endoftext|>
6bce3bdb397a2e943fc1245000feff2297c7f841d78b2d0d79509d14ba18c7ea
def get_template(self, fileid): ' fetch template content from config.xml\n :param fileid: internal fileid (field in template node)\n :return: string, bse64 encoded data or None if not found\n ' templates = self.rootNode.findall('./OPNsense/captiveportal/templates/template') if (templates is not None): for template in templates: if ((template.find('fileid') is not None) and (template.find('content') is not None)): if (template.find('fileid').text == fileid): return template.find('content').text return None
fetch template content from config.xml :param fileid: internal fileid (field in template node) :return: string, bse64 encoded data or None if not found
src/opnsense/scripts/OPNsense/CaptivePortal/lib/__init__.py
get_template
ServiusHack/core
2,109
python
def get_template(self, fileid): ' fetch template content from config.xml\n :param fileid: internal fileid (field in template node)\n :return: string, bse64 encoded data or None if not found\n ' templates = self.rootNode.findall('./OPNsense/captiveportal/templates/template') if (templates is not None): for template in templates: if ((template.find('fileid') is not None) and (template.find('content') is not None)): if (template.find('fileid').text == fileid): return template.find('content').text return None
def get_template(self, fileid): ' fetch template content from config.xml\n :param fileid: internal fileid (field in template node)\n :return: string, bse64 encoded data or None if not found\n ' templates = self.rootNode.findall('./OPNsense/captiveportal/templates/template') if (templates is not None): for template in templates: if ((template.find('fileid') is not None) and (template.find('content') is not None)): if (template.find('fileid').text == fileid): return template.find('content').text return None<|docstring|>fetch template content from config.xml :param fileid: internal fileid (field in template node) :return: string, bse64 encoded data or None if not found<|endoftext|>
0384f329873bb6a07ca16fc447b4d9f6361d7b9f0170b195b7ba8a5676b7f801
def ignore_aiohttp_ssl_eror(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n\tThere is an issue in Python 3.7 up to 3.7.3 that over-reports a\n\tssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n\tafter close notify (_ssl.c:2609)) after we are already done with the\n\tconnection. See GitHub issues aio-libs/aiohttp#3535 and\n\tpython/cpython#13548.\n\n\tGiven a loop, this sets up an exception handler that ignores this specific\n\texception, but passes everything else on to the previous exception handler\n\tthis one replaces.\n\n\tChecks for fixed Python versions, disabling itself when running on 3.7.4+\n\tor 3.8.\n\n\t' orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): SSL_PROTOCOLS = (asyncio.sslproto.SSLProtocol,) try: import uvloop.loop except ImportError: pass else: SSL_PROTOCOLS = (*SSL_PROTOCOLS, uvloop.loop.SSLProtocol) if (context.get('message') in {'SSL error in data received', 'Fatal error on transport', 'SSL handshake failed'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)
Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close There is an issue in Python 3.7 up to 3.7.3 that over-reports a ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data after close notify (_ssl.c:2609)) after we are already done with the connection. See GitHub issues aio-libs/aiohttp#3535 and python/cpython#13548. Given a loop, this sets up an exception handler that ignores this specific exception, but passes everything else on to the previous exception handler this one replaces. Checks for fixed Python versions, disabling itself when running on 3.7.4+ or 3.8.
modules/userlink_checker.py
ignore_aiohttp_ssl_eror
BUND-development/proxy-master
11
python
def ignore_aiohttp_ssl_eror(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n\tThere is an issue in Python 3.7 up to 3.7.3 that over-reports a\n\tssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n\tafter close notify (_ssl.c:2609)) after we are already done with the\n\tconnection. See GitHub issues aio-libs/aiohttp#3535 and\n\tpython/cpython#13548.\n\n\tGiven a loop, this sets up an exception handler that ignores this specific\n\texception, but passes everything else on to the previous exception handler\n\tthis one replaces.\n\n\tChecks for fixed Python versions, disabling itself when running on 3.7.4+\n\tor 3.8.\n\n\t' orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): SSL_PROTOCOLS = (asyncio.sslproto.SSLProtocol,) try: import uvloop.loop except ImportError: pass else: SSL_PROTOCOLS = (*SSL_PROTOCOLS, uvloop.loop.SSLProtocol) if (context.get('message') in {'SSL error in data received', 'Fatal error on transport', 'SSL handshake failed'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)
def ignore_aiohttp_ssl_eror(loop): 'Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close\n\n\tThere is an issue in Python 3.7 up to 3.7.3 that over-reports a\n\tssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data\n\tafter close notify (_ssl.c:2609)) after we are already done with the\n\tconnection. See GitHub issues aio-libs/aiohttp#3535 and\n\tpython/cpython#13548.\n\n\tGiven a loop, this sets up an exception handler that ignores this specific\n\texception, but passes everything else on to the previous exception handler\n\tthis one replaces.\n\n\tChecks for fixed Python versions, disabling itself when running on 3.7.4+\n\tor 3.8.\n\n\t' orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): SSL_PROTOCOLS = (asyncio.sslproto.SSLProtocol,) try: import uvloop.loop except ImportError: pass else: SSL_PROTOCOLS = (*SSL_PROTOCOLS, uvloop.loop.SSLProtocol) if (context.get('message') in {'SSL error in data received', 'Fatal error on transport', 'SSL handshake failed'}): exception = context.get('exception') protocol = context.get('protocol') if (isinstance(exception, ssl.SSLError) and (exception.reason == 'KRB5_S_INIT') and isinstance(protocol, SSL_PROTOCOLS)): if loop.get_debug(): asyncio.log.logger.debug('Ignoring asyncio SSL KRB5_S_INIT error') return if (orig_handler is not None): orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error)<|docstring|>Ignore aiohttp #3535 / cpython #13548 issue with SSL data after close There is an issue in Python 3.7 up to 3.7.3 that over-reports a ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data after close notify (_ssl.c:2609)) after we are already done with the connection. See GitHub issues aio-libs/aiohttp#3535 and python/cpython#13548. Given a loop, this sets up an exception handler that ignores this specific exception, but passes everything else on to the previous exception handler this one replaces. Checks for fixed Python versions, disabling itself when running on 3.7.4+ or 3.8.<|endoftext|>
af479bee808b98b140bc54c413a00723b2ad383e8a68ee8eb3eb376e2441d2ea
def checkingData(self): '\n\t\tПроверка запрашиваемого типа запроса\n\t\t' if (self.TYPE in 'GET POST HEAD'): pass else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')
Проверка запрашиваемого типа запроса
modules/userlink_checker.py
checkingData
BUND-development/proxy-master
11
python
def checkingData(self): '\n\t\t\n\t\t' if (self.TYPE in 'GET POST HEAD'): pass else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')
def checkingData(self): '\n\t\t\n\t\t' if (self.TYPE in 'GET POST HEAD'): pass else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')<|docstring|>Проверка запрашиваемого типа запроса<|endoftext|>
f0de7e9ec2c644e9631875bf8b8dc25e444d9ab03d068ee46c7577bd62c0176b
async def sendWithProxy(self, proxy, requestKwargs, **kwargs): '\n\t\tSending request\n\t\t' async with aiohttp.ClientSession(connector=ProxyConnector.from_url(proxy.formated), **kwargs) as session: if (self.TYPE == 'POST'): async with session.post(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'GET'): async with session.get(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'HEAD'): async with session.head(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')
Sending request
modules/userlink_checker.py
sendWithProxy
BUND-development/proxy-master
11
python
async def sendWithProxy(self, proxy, requestKwargs, **kwargs): '\n\t\t\n\t\t' async with aiohttp.ClientSession(connector=ProxyConnector.from_url(proxy.formated), **kwargs) as session: if (self.TYPE == 'POST'): async with session.post(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'GET'): async with session.get(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'HEAD'): async with session.head(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')
async def sendWithProxy(self, proxy, requestKwargs, **kwargs): '\n\t\t\n\t\t' async with aiohttp.ClientSession(connector=ProxyConnector.from_url(proxy.formated), **kwargs) as session: if (self.TYPE == 'POST'): async with session.post(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'GET'): async with session.get(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) elif (self.TYPE == 'HEAD'): async with session.head(self.URL, ssl=False, **requestKwargs) as response: return (await response.text()) else: raise UnsupportedType(f'Unknown request type: {self.TYPE}')<|docstring|>Sending request<|endoftext|>
f2e5236892859aaa17b88d2719e0129141dae549746077e026d1147f380f6eb4
async def userFilter(self, response): '\n\t\tUser filter function. Gets string of response. True if answer good, False if answer Bad\n\t\t' return True
User filter function. Gets string of response. True if answer good, False if answer Bad
modules/userlink_checker.py
userFilter
BUND-development/proxy-master
11
python
async def userFilter(self, response): '\n\t\t\n\t\t' return True
async def userFilter(self, response): '\n\t\t\n\t\t' return True<|docstring|>User filter function. Gets string of response. True if answer good, False if answer Bad<|endoftext|>
52c71046d16c2558e560a89e222015ff4a08bc655d8bb9920fc085b4be7ad9f0
async def startingCheck(self): ' \n\t\tAsync task for checking countries\n\t\t' send = backoff.on_exception(backoff.expo, Exception, max_time=(30 * self.MAXTRIES), max_tries=self.MAXTRIES, jitter=None)(self.sendWithProxy) while True: async with self.lock: if len(self.proxies): proxy = self.proxies.pop() else: break kwargs = {} if self.PARAMS: kwargs['params'] = self.params if self.DATA: kwargs['data'] = self.data headers = copy.deepcopy(self.headers) headers['User-Agent'] = self.agents[random.randint(0, (len(self.agents) - 1))] try: response = (await send(proxy, kwargs, timeout=self.TIMEOUT, headers=headers)) except UnsupportedType: raise UnsupportedType break except KeyboardInterrupt: for i in asyncio.all_tasks(): i.cancel() loop = asyncio.get_running_loop() loop.stop() break except Exception as e: async with self.lock: self.died.append(proxy) print((self.NAME + f'[{str(len(self.proxies))}]Died proxy: {proxy.normal}')) else: if (await self.userFilter(response)): async with self.lock: self.green.append(proxy) print(((self.NAME + colorama.Fore.GREEN) + f'[{str(len(self.proxies))}]Good proxy: {proxy.normal}')) else: async with self.lock: self.bad.append(proxy) print(((self.NAME + colorama.Fore.YELLOW) + f'[{str(len(self.proxies))}]Bad user`s check proxy: {proxy.normal}'))
Async task for checking countries
modules/userlink_checker.py
startingCheck
BUND-development/proxy-master
11
python
async def startingCheck(self): ' \n\t\t\n\t\t' send = backoff.on_exception(backoff.expo, Exception, max_time=(30 * self.MAXTRIES), max_tries=self.MAXTRIES, jitter=None)(self.sendWithProxy) while True: async with self.lock: if len(self.proxies): proxy = self.proxies.pop() else: break kwargs = {} if self.PARAMS: kwargs['params'] = self.params if self.DATA: kwargs['data'] = self.data headers = copy.deepcopy(self.headers) headers['User-Agent'] = self.agents[random.randint(0, (len(self.agents) - 1))] try: response = (await send(proxy, kwargs, timeout=self.TIMEOUT, headers=headers)) except UnsupportedType: raise UnsupportedType break except KeyboardInterrupt: for i in asyncio.all_tasks(): i.cancel() loop = asyncio.get_running_loop() loop.stop() break except Exception as e: async with self.lock: self.died.append(proxy) print((self.NAME + f'[{str(len(self.proxies))}]Died proxy: {proxy.normal}')) else: if (await self.userFilter(response)): async with self.lock: self.green.append(proxy) print(((self.NAME + colorama.Fore.GREEN) + f'[{str(len(self.proxies))}]Good proxy: {proxy.normal}')) else: async with self.lock: self.bad.append(proxy) print(((self.NAME + colorama.Fore.YELLOW) + f'[{str(len(self.proxies))}]Bad user`s check proxy: {proxy.normal}'))
async def startingCheck(self): ' \n\t\t\n\t\t' send = backoff.on_exception(backoff.expo, Exception, max_time=(30 * self.MAXTRIES), max_tries=self.MAXTRIES, jitter=None)(self.sendWithProxy) while True: async with self.lock: if len(self.proxies): proxy = self.proxies.pop() else: break kwargs = {} if self.PARAMS: kwargs['params'] = self.params if self.DATA: kwargs['data'] = self.data headers = copy.deepcopy(self.headers) headers['User-Agent'] = self.agents[random.randint(0, (len(self.agents) - 1))] try: response = (await send(proxy, kwargs, timeout=self.TIMEOUT, headers=headers)) except UnsupportedType: raise UnsupportedType break except KeyboardInterrupt: for i in asyncio.all_tasks(): i.cancel() loop = asyncio.get_running_loop() loop.stop() break except Exception as e: async with self.lock: self.died.append(proxy) print((self.NAME + f'[{str(len(self.proxies))}]Died proxy: {proxy.normal}')) else: if (await self.userFilter(response)): async with self.lock: self.green.append(proxy) print(((self.NAME + colorama.Fore.GREEN) + f'[{str(len(self.proxies))}]Good proxy: {proxy.normal}')) else: async with self.lock: self.bad.append(proxy) print(((self.NAME + colorama.Fore.YELLOW) + f'[{str(len(self.proxies))}]Bad user`s check proxy: {proxy.normal}'))<|docstring|>Async task for checking countries<|endoftext|>
0e964532c776cea2112bf2717ee5033d015f8f94802e792d27f6a341c2f7f42d
def get_fields_dict(table_name, fields): '\n\n This method goes into list of fields of particular table and find out the fields which match a specified conditions,\n if found they will be added to a dictionary along with the field which needs to be joined on.\n\n :param table_name: Name of a domain table\n :param fields: list of fields of a particular table\n :return: a dictionary\n ' fields_to_replace = dict() prefix_counter = 0 for field in fields: prefix_counter += 1 if (('_source_value' in field) and ((field[:(- 5)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 5)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'as_concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'as_concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_as_string' in field) and ((field[:(- 6)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 6)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif ((table_name == cdr_consts.PROCEDURE_OCCURRENCE) and (field == cdr_consts.QUALIFIER_SOURCE_VALUE)): fields_to_replace[field] = {'name': field, 'join_field': 'modifier_concept_id', 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} return fields_to_replace
This method goes into list of fields of particular table and find out the fields which match a specified conditions, if found they will be added to a dictionary along with the field which needs to be joined on. :param table_name: Name of a domain table :param fields: list of fields of a particular table :return: a dictionary
data_steward/cdr_cleaner/cleaning_rules/fill_free_text_source_value.py
get_fields_dict
dcampbell-vumc/curation
0
python
def get_fields_dict(table_name, fields): '\n\n This method goes into list of fields of particular table and find out the fields which match a specified conditions,\n if found they will be added to a dictionary along with the field which needs to be joined on.\n\n :param table_name: Name of a domain table\n :param fields: list of fields of a particular table\n :return: a dictionary\n ' fields_to_replace = dict() prefix_counter = 0 for field in fields: prefix_counter += 1 if (('_source_value' in field) and ((field[:(- 5)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 5)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'as_concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'as_concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_as_string' in field) and ((field[:(- 6)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 6)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif ((table_name == cdr_consts.PROCEDURE_OCCURRENCE) and (field == cdr_consts.QUALIFIER_SOURCE_VALUE)): fields_to_replace[field] = {'name': field, 'join_field': 'modifier_concept_id', 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} return fields_to_replace
def get_fields_dict(table_name, fields): '\n\n This method goes into list of fields of particular table and find out the fields which match a specified conditions,\n if found they will be added to a dictionary along with the field which needs to be joined on.\n\n :param table_name: Name of a domain table\n :param fields: list of fields of a particular table\n :return: a dictionary\n ' fields_to_replace = dict() prefix_counter = 0 for field in fields: prefix_counter += 1 if (('_source_value' in field) and ((field[:(- 5)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 5)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_source_value' in field) and ((field[:(- 12)] + 'as_concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 12)] + 'as_concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif (('_as_string' in field) and ((field[:(- 6)] + 'concept_id') in fields)): fields_to_replace[field] = {'name': field, 'join_field': (field[:(- 6)] + 'concept_id'), 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} elif ((table_name == cdr_consts.PROCEDURE_OCCURRENCE) and (field == cdr_consts.QUALIFIER_SOURCE_VALUE)): fields_to_replace[field] = {'name': field, 'join_field': 'modifier_concept_id', 'prefix': (field[:3] + '_{counter}'.format(counter=prefix_counter))} return fields_to_replace<|docstring|>This method goes into list of fields of particular table and find out the fields which match a specified conditions, if found they will be added to a dictionary along with the field which needs to be joined on. :param table_name: Name of a domain table :param fields: list of fields of a particular table :return: a dictionary<|endoftext|>
12e76e81a296ec06d8ff359cb833aec8c99e378812c3db55d2cd47ad36c03347
def get_modified_columns(fields, fields_to_replace): '\n\n This method updates the columns by adding prefix to each column if the column is being replaced and\n joins it with other columns.\n\n :param fields: list of fields of a particular table\n :param fields_to_replace: dictionary of fields of a table which needs to be updated\n :return: a string\n ' col_exprs = [] for field in fields: if (field in fields_to_replace): col_expr = '{prefix}.concept_code as {name}'.format(prefix=fields_to_replace[field]['prefix'], name=fields_to_replace[field]['name']) else: col_expr = field col_exprs.append(col_expr) cols = ', '.join(col_exprs) return cols
This method updates the columns by adding prefix to each column if the column is being replaced and joins it with other columns. :param fields: list of fields of a particular table :param fields_to_replace: dictionary of fields of a table which needs to be updated :return: a string
data_steward/cdr_cleaner/cleaning_rules/fill_free_text_source_value.py
get_modified_columns
dcampbell-vumc/curation
0
python
def get_modified_columns(fields, fields_to_replace): '\n\n This method updates the columns by adding prefix to each column if the column is being replaced and\n joins it with other columns.\n\n :param fields: list of fields of a particular table\n :param fields_to_replace: dictionary of fields of a table which needs to be updated\n :return: a string\n ' col_exprs = [] for field in fields: if (field in fields_to_replace): col_expr = '{prefix}.concept_code as {name}'.format(prefix=fields_to_replace[field]['prefix'], name=fields_to_replace[field]['name']) else: col_expr = field col_exprs.append(col_expr) cols = ', '.join(col_exprs) return cols
def get_modified_columns(fields, fields_to_replace): '\n\n This method updates the columns by adding prefix to each column if the column is being replaced and\n joins it with other columns.\n\n :param fields: list of fields of a particular table\n :param fields_to_replace: dictionary of fields of a table which needs to be updated\n :return: a string\n ' col_exprs = [] for field in fields: if (field in fields_to_replace): col_expr = '{prefix}.concept_code as {name}'.format(prefix=fields_to_replace[field]['prefix'], name=fields_to_replace[field]['name']) else: col_expr = field col_exprs.append(col_expr) cols = ', '.join(col_exprs) return cols<|docstring|>This method updates the columns by adding prefix to each column if the column is being replaced and joins it with other columns. :param fields: list of fields of a particular table :param fields_to_replace: dictionary of fields of a table which needs to be updated :return: a string<|endoftext|>
5b8909c4b6aa74f6907477e49b4d28f8c006dd807bcfa3d8554d7affbbc8b36c
def get_full_join_expression(dataset_id, project_id, fields_to_replace): '\n\n This collects all the join expressions and joins them as a string and returns a string.\n\n :param dataset_id: Name of the dataset\n :param project_id: Name of the project\n :param fields_to_replace: dictionary of fields to be joined\n :return:\n ' join_expr = [] for field in fields_to_replace: left_join = LEFT_JOIN.format(project=project_id, dataset=dataset_id, concept_id_field=fields_to_replace[field]['join_field'], prefix='{}'.format(fields_to_replace[field]['prefix'])) join_expr.append(left_join) return ' '.join(join_expr)
This collects all the join expressions and joins them as a string and returns a string. :param dataset_id: Name of the dataset :param project_id: Name of the project :param fields_to_replace: dictionary of fields to be joined :return:
data_steward/cdr_cleaner/cleaning_rules/fill_free_text_source_value.py
get_full_join_expression
dcampbell-vumc/curation
0
python
def get_full_join_expression(dataset_id, project_id, fields_to_replace): '\n\n This collects all the join expressions and joins them as a string and returns a string.\n\n :param dataset_id: Name of the dataset\n :param project_id: Name of the project\n :param fields_to_replace: dictionary of fields to be joined\n :return:\n ' join_expr = [] for field in fields_to_replace: left_join = LEFT_JOIN.format(project=project_id, dataset=dataset_id, concept_id_field=fields_to_replace[field]['join_field'], prefix='{}'.format(fields_to_replace[field]['prefix'])) join_expr.append(left_join) return ' '.join(join_expr)
def get_full_join_expression(dataset_id, project_id, fields_to_replace): '\n\n This collects all the join expressions and joins them as a string and returns a string.\n\n :param dataset_id: Name of the dataset\n :param project_id: Name of the project\n :param fields_to_replace: dictionary of fields to be joined\n :return:\n ' join_expr = [] for field in fields_to_replace: left_join = LEFT_JOIN.format(project=project_id, dataset=dataset_id, concept_id_field=fields_to_replace[field]['join_field'], prefix='{}'.format(fields_to_replace[field]['prefix'])) join_expr.append(left_join) return ' '.join(join_expr)<|docstring|>This collects all the join expressions and joins them as a string and returns a string. :param dataset_id: Name of the dataset :param project_id: Name of the project :param fields_to_replace: dictionary of fields to be joined :return:<|endoftext|>
7167687e84e05791acd24b5a22e3fd2bb53449d15c5236d0bcf5e2a97cdec0dc
def get_fill_freetext_source_value_fields_queries(project_id, dataset_id): '\n\n Generates queries to replace the source_value_fields with the concept_code.\n\n :param project_id: Name of the project where the dataset on which the rules are to be applied on\n :param dataset_id: Name of the dataset on which the rules are to be applied on\n :return: A list of queries to be run.\n ' queries_list = [] for table in resources.CDM_TABLES: fields = [field['name'] for field in resources.fields_for(table)] fields_to_replace = get_fields_dict(table, fields) if fields_to_replace: cols = get_modified_columns(fields, fields_to_replace) full_join_expression = get_full_join_expression(dataset_id, project_id, fields_to_replace) query = dict() query[cdr_consts.QUERY] = FIELD_REPLACE_QUERY.format(columns=cols, table_name=table, dataset=dataset_id, project=project_id, join_expression=full_join_expression) query[cdr_consts.DESTINATION_TABLE] = table query[cdr_consts.DISPOSITION] = bq_consts.WRITE_TRUNCATE query[cdr_consts.DESTINATION_DATASET] = dataset_id queries_list.append(query) return queries_list
Generates queries to replace the source_value_fields with the concept_code. :param project_id: Name of the project where the dataset on which the rules are to be applied on :param dataset_id: Name of the dataset on which the rules are to be applied on :return: A list of queries to be run.
data_steward/cdr_cleaner/cleaning_rules/fill_free_text_source_value.py
get_fill_freetext_source_value_fields_queries
dcampbell-vumc/curation
0
python
def get_fill_freetext_source_value_fields_queries(project_id, dataset_id): '\n\n Generates queries to replace the source_value_fields with the concept_code.\n\n :param project_id: Name of the project where the dataset on which the rules are to be applied on\n :param dataset_id: Name of the dataset on which the rules are to be applied on\n :return: A list of queries to be run.\n ' queries_list = [] for table in resources.CDM_TABLES: fields = [field['name'] for field in resources.fields_for(table)] fields_to_replace = get_fields_dict(table, fields) if fields_to_replace: cols = get_modified_columns(fields, fields_to_replace) full_join_expression = get_full_join_expression(dataset_id, project_id, fields_to_replace) query = dict() query[cdr_consts.QUERY] = FIELD_REPLACE_QUERY.format(columns=cols, table_name=table, dataset=dataset_id, project=project_id, join_expression=full_join_expression) query[cdr_consts.DESTINATION_TABLE] = table query[cdr_consts.DISPOSITION] = bq_consts.WRITE_TRUNCATE query[cdr_consts.DESTINATION_DATASET] = dataset_id queries_list.append(query) return queries_list
def get_fill_freetext_source_value_fields_queries(project_id, dataset_id): '\n\n Generates queries to replace the source_value_fields with the concept_code.\n\n :param project_id: Name of the project where the dataset on which the rules are to be applied on\n :param dataset_id: Name of the dataset on which the rules are to be applied on\n :return: A list of queries to be run.\n ' queries_list = [] for table in resources.CDM_TABLES: fields = [field['name'] for field in resources.fields_for(table)] fields_to_replace = get_fields_dict(table, fields) if fields_to_replace: cols = get_modified_columns(fields, fields_to_replace) full_join_expression = get_full_join_expression(dataset_id, project_id, fields_to_replace) query = dict() query[cdr_consts.QUERY] = FIELD_REPLACE_QUERY.format(columns=cols, table_name=table, dataset=dataset_id, project=project_id, join_expression=full_join_expression) query[cdr_consts.DESTINATION_TABLE] = table query[cdr_consts.DISPOSITION] = bq_consts.WRITE_TRUNCATE query[cdr_consts.DESTINATION_DATASET] = dataset_id queries_list.append(query) return queries_list<|docstring|>Generates queries to replace the source_value_fields with the concept_code. :param project_id: Name of the project where the dataset on which the rules are to be applied on :param dataset_id: Name of the dataset on which the rules are to be applied on :return: A list of queries to be run.<|endoftext|>
b13c6480b1705649ab2622d209b947ba986dfc7c8416ba5251f969299c0e0f3f
def read_ants_stats(ants_stats_file, ants_brainvols_file, mri_file, force_error=True): '\n Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys:\n [\'structure\':XX, \'items\': [{\'name\': \'NVoxels\', \'description\': \'Number of voxels\',\'value\':XX, \'units\':\'unitless\'},\n {\'name\': \'Volume_mm3\', \'description\': \'\'Volume\', \'value\':XX, \'units\':\'mm^3\'}]]\n :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats"\n :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols"\n :param mri_file: mri file to extract voxel sizes from\n :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names\n :return: measures is a list of dictionaries as defined above\n ' ants_stats = pd.read_csv(ants_stats_file) brain_vols = pd.read_csv(ants_brainvols_file) img = nib.load(mri_file) vox_size = np.product(list(img.header.get_zooms())) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) measures = [] changed = False for (key, j) in brain_vols.T.iterrows(): value = j.values[0] keytuple = ANTSDKT(structure=(key if ('vol' in key.lower()) else 'Brain'), hemi=None, measure=('Volume' if ('vol' in key.lower()) else key), unit=('mm^3' if ('vol' in key.lower()) else ('mm' if ('Thickness' in key) else None))) if (str(keytuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(keytuple)] = {'id': f"{ants_cde['count']:0>6d}", 'label': f'{key} ({keytuple.unit})'} if force_error: raise ValueError(f'Key {keytuple} not found in ANTS data elements file') changed = True if ('vol' in key.lower()): measures.append((f"{ants_cde[str(keytuple)]['id']}", str(int(value)))) else: measures.append((f"{ants_cde[str(keytuple)]['id']}", str(value))) for row in ants_stats.iterrows(): structure = None for (key, val) in row[1].items(): if (key == 'Label'): segid = int(val) structure = get_id_to_struct(segid) if (structure is None): raise ValueError(f'{int(val):d} did not return any structure') continue if (('VolumeInVoxels' not in key) and ('Area' not in key)): continue (hemi, measure, unit) = get_details(key, structure) key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True if ('VolumeInVoxels' in key): measure = 'Volume' unit = 'mm^3' key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True measures.append((f"{ants_cde[str(key_tuple)]['id']}", str((val * vox_size)))) if changed: with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) return measures
Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys: ['structure':XX, 'items': [{'name': 'NVoxels', 'description': 'Number of voxels','value':XX, 'units':'unitless'}, {'name': 'Volume_mm3', 'description': ''Volume', 'value':XX, 'units':'mm^3'}]] :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats" :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols" :param mri_file: mri file to extract voxel sizes from :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names :return: measures is a list of dictionaries as defined above
ants_seg_to_nidm/antsutils.py
read_ants_stats
satra/ants_seg_to_nidm
0
python
def read_ants_stats(ants_stats_file, ants_brainvols_file, mri_file, force_error=True): '\n Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys:\n [\'structure\':XX, \'items\': [{\'name\': \'NVoxels\', \'description\': \'Number of voxels\',\'value\':XX, \'units\':\'unitless\'},\n {\'name\': \'Volume_mm3\', \'description\': \'\'Volume\', \'value\':XX, \'units\':\'mm^3\'}]]\n :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats"\n :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols"\n :param mri_file: mri file to extract voxel sizes from\n :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names\n :return: measures is a list of dictionaries as defined above\n ' ants_stats = pd.read_csv(ants_stats_file) brain_vols = pd.read_csv(ants_brainvols_file) img = nib.load(mri_file) vox_size = np.product(list(img.header.get_zooms())) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) measures = [] changed = False for (key, j) in brain_vols.T.iterrows(): value = j.values[0] keytuple = ANTSDKT(structure=(key if ('vol' in key.lower()) else 'Brain'), hemi=None, measure=('Volume' if ('vol' in key.lower()) else key), unit=('mm^3' if ('vol' in key.lower()) else ('mm' if ('Thickness' in key) else None))) if (str(keytuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(keytuple)] = {'id': f"{ants_cde['count']:0>6d}", 'label': f'{key} ({keytuple.unit})'} if force_error: raise ValueError(f'Key {keytuple} not found in ANTS data elements file') changed = True if ('vol' in key.lower()): measures.append((f"{ants_cde[str(keytuple)]['id']}", str(int(value)))) else: measures.append((f"{ants_cde[str(keytuple)]['id']}", str(value))) for row in ants_stats.iterrows(): structure = None for (key, val) in row[1].items(): if (key == 'Label'): segid = int(val) structure = get_id_to_struct(segid) if (structure is None): raise ValueError(f'{int(val):d} did not return any structure') continue if (('VolumeInVoxels' not in key) and ('Area' not in key)): continue (hemi, measure, unit) = get_details(key, structure) key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True if ('VolumeInVoxels' in key): measure = 'Volume' unit = 'mm^3' key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True measures.append((f"{ants_cde[str(key_tuple)]['id']}", str((val * vox_size)))) if changed: with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) return measures
def read_ants_stats(ants_stats_file, ants_brainvols_file, mri_file, force_error=True): '\n Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys:\n [\'structure\':XX, \'items\': [{\'name\': \'NVoxels\', \'description\': \'Number of voxels\',\'value\':XX, \'units\':\'unitless\'},\n {\'name\': \'Volume_mm3\', \'description\': \'\'Volume\', \'value\':XX, \'units\':\'mm^3\'}]]\n :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats"\n :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols"\n :param mri_file: mri file to extract voxel sizes from\n :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names\n :return: measures is a list of dictionaries as defined above\n ' ants_stats = pd.read_csv(ants_stats_file) brain_vols = pd.read_csv(ants_brainvols_file) img = nib.load(mri_file) vox_size = np.product(list(img.header.get_zooms())) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) measures = [] changed = False for (key, j) in brain_vols.T.iterrows(): value = j.values[0] keytuple = ANTSDKT(structure=(key if ('vol' in key.lower()) else 'Brain'), hemi=None, measure=('Volume' if ('vol' in key.lower()) else key), unit=('mm^3' if ('vol' in key.lower()) else ('mm' if ('Thickness' in key) else None))) if (str(keytuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(keytuple)] = {'id': f"{ants_cde['count']:0>6d}", 'label': f'{key} ({keytuple.unit})'} if force_error: raise ValueError(f'Key {keytuple} not found in ANTS data elements file') changed = True if ('vol' in key.lower()): measures.append((f"{ants_cde[str(keytuple)]['id']}", str(int(value)))) else: measures.append((f"{ants_cde[str(keytuple)]['id']}", str(value))) for row in ants_stats.iterrows(): structure = None for (key, val) in row[1].items(): if (key == 'Label'): segid = int(val) structure = get_id_to_struct(segid) if (structure is None): raise ValueError(f'{int(val):d} did not return any structure') continue if (('VolumeInVoxels' not in key) and ('Area' not in key)): continue (hemi, measure, unit) = get_details(key, structure) key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True if ('VolumeInVoxels' in key): measure = 'Volume' unit = 'mm^3' key_tuple = ANTSDKT(structure=structure, hemi=hemi, measure=measure, unit=unit) label = f'{structure} {measure} ({unit})' if (str(key_tuple) not in ants_cde): ants_cde['count'] += 1 ants_cde[str(key_tuple)] = {'id': f"{ants_cde['count']:0>6d}", 'structure_id': segid, 'label': label} if force_error: raise ValueError(f'Key {key_tuple} not found in ANTS data elements file') changed = True measures.append((f"{ants_cde[str(key_tuple)]['id']}", str((val * vox_size)))) if changed: with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) return measures<|docstring|>Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys: ['structure':XX, 'items': [{'name': 'NVoxels', 'description': 'Number of voxels','value':XX, 'units':'unitless'}, {'name': 'Volume_mm3', 'description': ''Volume', 'value':XX, 'units':'mm^3'}]] :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats" :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols" :param mri_file: mri file to extract voxel sizes from :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names :return: measures is a list of dictionaries as defined above<|endoftext|>
2c3d0388620e9a33ab813cc4e041bcaf1666802c27df91a3c21972c50a11a453
def create_ants_mapper(): 'Create FreeSurfer to ReproNim mapping information\n ' with open(map_file, 'r') as fp: ants_map = json.load(fp) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) s = ants_map['Structures'] m = ants_map['Measures'] for key in ants_cde: if (key == 'count'): continue key_tuple = eval(key) sk = key_tuple.structure mk = key_tuple.measure hk = hemiless(sk) if (hk in s): if (sk not in s[hk]['antskey']): s[hk]['antskey'].append(sk) else: s[hk] = dict(isAbout=None, antskey=[sk]) if (mk not in m): m[mk] = dict(measureOf=None, datumType=None, hasUnit=key_tuple.unit) if ((s[hk]['isAbout'] is not None) and (('UNKNOWN' not in s[hk]['isAbout']) and ('CUSTOM' not in s[hk]['isAbout']))): ants_cde[key]['isAbout'] = s[hk]['isAbout'] if (m[key_tuple.measure]['measureOf'] is not None): ants_cde[key].update(**m[key_tuple.measure]) with open(map_file, 'w') as fp: json.dump(ants_map, fp, sort_keys=True, indent=2) fp.write('\n') with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) fp.write('\n') return (ants_map, ants_cde)
Create FreeSurfer to ReproNim mapping information
ants_seg_to_nidm/antsutils.py
create_ants_mapper
satra/ants_seg_to_nidm
0
python
def create_ants_mapper(): '\n ' with open(map_file, 'r') as fp: ants_map = json.load(fp) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) s = ants_map['Structures'] m = ants_map['Measures'] for key in ants_cde: if (key == 'count'): continue key_tuple = eval(key) sk = key_tuple.structure mk = key_tuple.measure hk = hemiless(sk) if (hk in s): if (sk not in s[hk]['antskey']): s[hk]['antskey'].append(sk) else: s[hk] = dict(isAbout=None, antskey=[sk]) if (mk not in m): m[mk] = dict(measureOf=None, datumType=None, hasUnit=key_tuple.unit) if ((s[hk]['isAbout'] is not None) and (('UNKNOWN' not in s[hk]['isAbout']) and ('CUSTOM' not in s[hk]['isAbout']))): ants_cde[key]['isAbout'] = s[hk]['isAbout'] if (m[key_tuple.measure]['measureOf'] is not None): ants_cde[key].update(**m[key_tuple.measure]) with open(map_file, 'w') as fp: json.dump(ants_map, fp, sort_keys=True, indent=2) fp.write('\n') with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) fp.write('\n') return (ants_map, ants_cde)
def create_ants_mapper(): '\n ' with open(map_file, 'r') as fp: ants_map = json.load(fp) with open(cde_file, 'r') as fp: ants_cde = json.load(fp) s = ants_map['Structures'] m = ants_map['Measures'] for key in ants_cde: if (key == 'count'): continue key_tuple = eval(key) sk = key_tuple.structure mk = key_tuple.measure hk = hemiless(sk) if (hk in s): if (sk not in s[hk]['antskey']): s[hk]['antskey'].append(sk) else: s[hk] = dict(isAbout=None, antskey=[sk]) if (mk not in m): m[mk] = dict(measureOf=None, datumType=None, hasUnit=key_tuple.unit) if ((s[hk]['isAbout'] is not None) and (('UNKNOWN' not in s[hk]['isAbout']) and ('CUSTOM' not in s[hk]['isAbout']))): ants_cde[key]['isAbout'] = s[hk]['isAbout'] if (m[key_tuple.measure]['measureOf'] is not None): ants_cde[key].update(**m[key_tuple.measure]) with open(map_file, 'w') as fp: json.dump(ants_map, fp, sort_keys=True, indent=2) fp.write('\n') with open(cde_file, 'w') as fp: json.dump(ants_cde, fp, indent=2) fp.write('\n') return (ants_map, ants_cde)<|docstring|>Create FreeSurfer to ReproNim mapping information<|endoftext|>
25b33258d789b7dacbc0906369f1ae525482348e1e005f3b501c6ad8a9f2c47f
def create_cde_graph(restrict_to=None): 'Create an RDFLIB graph with the FreeSurfer CDEs\n\n Any CDE that has a mapping will be mapped\n ' with open(cde_file, 'r') as fp: ants_cde = json.load(fp) from nidm.core import Constants ants = Constants.ANTS nidm = Constants.NIDM g = rl.Graph() g.bind('ants', ants) g.bind('nidm', nidm) g.bind('uberon', 'http://purl.obolibrary.org/obo/UBERON_') g.bind('ilx', 'http://uri.interlex.org/base/ilx_') for (key, value) in ants_cde.items(): if (key == 'count'): continue if (restrict_to is not None): if (value['id'] not in restrict_to): continue for (subkey, item) in value.items(): if (subkey == 'id'): antsid = ('ants_' + item) g.add((ants[antsid], rl.RDF.type, ants['DataElement'])) continue if ((item is None) or ('unknown' in str(item))): continue if (subkey in ['isAbout', 'datumType', 'measureOf']): g.add((ants[antsid], nidm[subkey], rl.URIRef(item))) elif (subkey in ['hasUnit']): g.add((ants[antsid], nidm[subkey], rl.Literal(item))) elif isinstance(item, rl.URIRef): g.add((ants[antsid], ants[subkey], item)) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) key_tuple = eval(key) for (subkey, item) in key_tuple._asdict().items(): if (item is None): continue if (subkey == 'hemi'): g.add((ants[antsid], nidm['hasLaterality'], rl.Literal(item))) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) return g
Create an RDFLIB graph with the FreeSurfer CDEs Any CDE that has a mapping will be mapped
ants_seg_to_nidm/antsutils.py
create_cde_graph
satra/ants_seg_to_nidm
0
python
def create_cde_graph(restrict_to=None): 'Create an RDFLIB graph with the FreeSurfer CDEs\n\n Any CDE that has a mapping will be mapped\n ' with open(cde_file, 'r') as fp: ants_cde = json.load(fp) from nidm.core import Constants ants = Constants.ANTS nidm = Constants.NIDM g = rl.Graph() g.bind('ants', ants) g.bind('nidm', nidm) g.bind('uberon', 'http://purl.obolibrary.org/obo/UBERON_') g.bind('ilx', 'http://uri.interlex.org/base/ilx_') for (key, value) in ants_cde.items(): if (key == 'count'): continue if (restrict_to is not None): if (value['id'] not in restrict_to): continue for (subkey, item) in value.items(): if (subkey == 'id'): antsid = ('ants_' + item) g.add((ants[antsid], rl.RDF.type, ants['DataElement'])) continue if ((item is None) or ('unknown' in str(item))): continue if (subkey in ['isAbout', 'datumType', 'measureOf']): g.add((ants[antsid], nidm[subkey], rl.URIRef(item))) elif (subkey in ['hasUnit']): g.add((ants[antsid], nidm[subkey], rl.Literal(item))) elif isinstance(item, rl.URIRef): g.add((ants[antsid], ants[subkey], item)) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) key_tuple = eval(key) for (subkey, item) in key_tuple._asdict().items(): if (item is None): continue if (subkey == 'hemi'): g.add((ants[antsid], nidm['hasLaterality'], rl.Literal(item))) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) return g
def create_cde_graph(restrict_to=None): 'Create an RDFLIB graph with the FreeSurfer CDEs\n\n Any CDE that has a mapping will be mapped\n ' with open(cde_file, 'r') as fp: ants_cde = json.load(fp) from nidm.core import Constants ants = Constants.ANTS nidm = Constants.NIDM g = rl.Graph() g.bind('ants', ants) g.bind('nidm', nidm) g.bind('uberon', 'http://purl.obolibrary.org/obo/UBERON_') g.bind('ilx', 'http://uri.interlex.org/base/ilx_') for (key, value) in ants_cde.items(): if (key == 'count'): continue if (restrict_to is not None): if (value['id'] not in restrict_to): continue for (subkey, item) in value.items(): if (subkey == 'id'): antsid = ('ants_' + item) g.add((ants[antsid], rl.RDF.type, ants['DataElement'])) continue if ((item is None) or ('unknown' in str(item))): continue if (subkey in ['isAbout', 'datumType', 'measureOf']): g.add((ants[antsid], nidm[subkey], rl.URIRef(item))) elif (subkey in ['hasUnit']): g.add((ants[antsid], nidm[subkey], rl.Literal(item))) elif isinstance(item, rl.URIRef): g.add((ants[antsid], ants[subkey], item)) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) key_tuple = eval(key) for (subkey, item) in key_tuple._asdict().items(): if (item is None): continue if (subkey == 'hemi'): g.add((ants[antsid], nidm['hasLaterality'], rl.Literal(item))) else: g.add((ants[antsid], ants[subkey], rl.Literal(item))) return g<|docstring|>Create an RDFLIB graph with the FreeSurfer CDEs Any CDE that has a mapping will be mapped<|endoftext|>
97cd44fede60408c9f9ec7ab939decf9aeffa06bac85fc709a238df1456416af
def convert_stats_to_nidm(stats): 'Convert a stats record into a NIDM entity\n\n Returns the entity and the prov document\n ' from nidm.core import Constants from nidm.experiment.Core import getUUID import prov ants = prov.model.Namespace('ants', str(Constants.ANTS)) niiri = prov.model.Namespace('niiri', str(Constants.NIIRI)) nidm = prov.model.Namespace('nidm', 'http://purl.org/nidash/nidm#') doc = prov.model.ProvDocument() e = doc.entity(identifier=niiri[getUUID()]) e.add_asserted_type(nidm['ANTSStatsCollection']) e.add_attributes({ants[('ants_' + val[0])]: prov.model.Literal(val[1], datatype=(prov.model.XSD['float'] if ('.' in val[1]) else prov.model.XSD['integer'])) for val in stats}) return (e, doc)
Convert a stats record into a NIDM entity Returns the entity and the prov document
ants_seg_to_nidm/antsutils.py
convert_stats_to_nidm
satra/ants_seg_to_nidm
0
python
def convert_stats_to_nidm(stats): 'Convert a stats record into a NIDM entity\n\n Returns the entity and the prov document\n ' from nidm.core import Constants from nidm.experiment.Core import getUUID import prov ants = prov.model.Namespace('ants', str(Constants.ANTS)) niiri = prov.model.Namespace('niiri', str(Constants.NIIRI)) nidm = prov.model.Namespace('nidm', 'http://purl.org/nidash/nidm#') doc = prov.model.ProvDocument() e = doc.entity(identifier=niiri[getUUID()]) e.add_asserted_type(nidm['ANTSStatsCollection']) e.add_attributes({ants[('ants_' + val[0])]: prov.model.Literal(val[1], datatype=(prov.model.XSD['float'] if ('.' in val[1]) else prov.model.XSD['integer'])) for val in stats}) return (e, doc)
def convert_stats_to_nidm(stats): 'Convert a stats record into a NIDM entity\n\n Returns the entity and the prov document\n ' from nidm.core import Constants from nidm.experiment.Core import getUUID import prov ants = prov.model.Namespace('ants', str(Constants.ANTS)) niiri = prov.model.Namespace('niiri', str(Constants.NIIRI)) nidm = prov.model.Namespace('nidm', 'http://purl.org/nidash/nidm#') doc = prov.model.ProvDocument() e = doc.entity(identifier=niiri[getUUID()]) e.add_asserted_type(nidm['ANTSStatsCollection']) e.add_attributes({ants[('ants_' + val[0])]: prov.model.Literal(val[1], datatype=(prov.model.XSD['float'] if ('.' in val[1]) else prov.model.XSD['integer'])) for val in stats}) return (e, doc)<|docstring|>Convert a stats record into a NIDM entity Returns the entity and the prov document<|endoftext|>
6ceca7db8d6c99cc3163dea1ac0e6ccc703af654f0912abca896cbeb167be61e
def get_commands(servo): 'Get specific flash commands for Zork\n\n Each board needs specific commands including the voltage for Vref, to turn\n on and turn off the SPI flashThe get_*_commands() functions provide a\n board-specific set of commands for these tasks. The voltage for this board\n needs to be set to 1.8 V.\n\n Args:\n servo (servo_lib.Servo): The servo connected to the target DUT.\n\n Returns:\n list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]\n dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"],\n ["cmd2", "arg3", "arg4"]]\n where cmd1 will be run before cmd2\n flashrom_cmd=command to flash via flashrom\n futility_cmd=command to flash via futility\n ' dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial) elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('raiden_debug_spi:serial=%s' % servo.serial) elif servo.is_ccd: programmer = ('raiden_debug_spi:target=AP,serial=%s' % servo.serial) else: raise Exception(('%s not supported' % servo.version)) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
Get specific flash commands for Zork Each board needs specific commands including the voltage for Vref, to turn on and turn off the SPI flashThe get_*_commands() functions provide a board-specific set of commands for these tasks. The voltage for this board needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"], ["cmd2", "arg3", "arg4"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility
lib/firmware/ap_firmware_config/zork.py
get_commands
khromiumos/chromiumos-chromite
0
python
def get_commands(servo): 'Get specific flash commands for Zork\n\n Each board needs specific commands including the voltage for Vref, to turn\n on and turn off the SPI flashThe get_*_commands() functions provide a\n board-specific set of commands for these tasks. The voltage for this board\n needs to be set to 1.8 V.\n\n Args:\n servo (servo_lib.Servo): The servo connected to the target DUT.\n\n Returns:\n list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]\n dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"],\n ["cmd2", "arg3", "arg4"]]\n where cmd1 will be run before cmd2\n flashrom_cmd=command to flash via flashrom\n futility_cmd=command to flash via futility\n ' dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial) elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('raiden_debug_spi:serial=%s' % servo.serial) elif servo.is_ccd: programmer = ('raiden_debug_spi:target=AP,serial=%s' % servo.serial) else: raise Exception(('%s not supported' % servo.version)) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
def get_commands(servo): 'Get specific flash commands for Zork\n\n Each board needs specific commands including the voltage for Vref, to turn\n on and turn off the SPI flashThe get_*_commands() functions provide a\n board-specific set of commands for these tasks. The voltage for this board\n needs to be set to 1.8 V.\n\n Args:\n servo (servo_lib.Servo): The servo connected to the target DUT.\n\n Returns:\n list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]\n dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"],\n ["cmd2", "arg3", "arg4"]]\n where cmd1 will be run before cmd2\n flashrom_cmd=command to flash via flashrom\n futility_cmd=command to flash via futility\n ' dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial) elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on', 'cold_reset:on', 'servo_present:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off', 'cold_reset:off', 'servo_present:off']) programmer = ('raiden_debug_spi:serial=%s' % servo.serial) elif servo.is_ccd: programmer = ('raiden_debug_spi:target=AP,serial=%s' % servo.serial) else: raise Exception(('%s not supported' % servo.version)) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]<|docstring|>Get specific flash commands for Zork Each board needs specific commands including the voltage for Vref, to turn on and turn off the SPI flashThe get_*_commands() functions provide a board-specific set of commands for these tasks. The voltage for this board needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"], ["cmd2", "arg3", "arg4"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility<|endoftext|>
dd0cd75bbb850ecc54180faf67e659af174b2bc5765eb452969f339ed1a2c6c7
@staticmethod def dumps(obj): 'Helper to format json.' return json.dumps(obj, cls=ReplicationManagerJsonEncoder)
Helper to format json.
s3/replication/replicator/src/s3replicator/replication_managers.py
dumps
gauravchaudhari02/cortx-multisite
1
python
@staticmethod def dumps(obj): return json.dumps(obj, cls=ReplicationManagerJsonEncoder)
@staticmethod def dumps(obj): return json.dumps(obj, cls=ReplicationManagerJsonEncoder)<|docstring|>Helper to format json.<|endoftext|>
a59bf5498c4d585a2892727ec98abfee88e8851714fb8e41cfa7289c28e543df
def __init__(self): 'Initialise ReplicationManagers collection.' super(ReplicationManagers, self).__init__()
Initialise ReplicationManagers collection.
s3/replication/replicator/src/s3replicator/replication_managers.py
__init__
gauravchaudhari02/cortx-multisite
1
python
def __init__(self): super(ReplicationManagers, self).__init__()
def __init__(self): super(ReplicationManagers, self).__init__()<|docstring|>Initialise ReplicationManagers collection.<|endoftext|>
f34f60933f8437f99143f3df22a41054de8c3d7b571c7a620259cff243f611dc
async def close(self): 'Resets and closes all replication manager sessions.' for manager in self.values(): (await manager.close())
Resets and closes all replication manager sessions.
s3/replication/replicator/src/s3replicator/replication_managers.py
close
gauravchaudhari02/cortx-multisite
1
python
async def close(self): for manager in self.values(): (await manager.close())
async def close(self): for manager in self.values(): (await manager.close())<|docstring|>Resets and closes all replication manager sessions.<|endoftext|>
a3fe36211f43ca885f7c953efa301f819e414b1b482133017f55d456e12a67b2
def push_results_to_db(db_url, details, logger): '\n POST results to the Result target DB\n ' url = (db_url + '/results') headers = {'Content-Type': 'application/json'} try: if logger: jsonified_params = json.dumps(details) logger.info(('Pushing results to %s' % url)) logger.debug(('Parameters: %s' % details)) r = requests.post(url, data=jsonified_params, headers=headers) if logger: logger.debug(r) logger.debug(r.status_code) logger.debug(r.content) return json.loads(r.content) except Exception: if logger: logger.exception(("Error [push_results_to_db('%s', '%s')]:" % (db_url, details))) return None
POST results to the Result target DB
docker/storperf-master/storperf/db/test_results_db.py
push_results_to_db
hashnfv/hashnfv-storperf
0
python
def push_results_to_db(db_url, details, logger): '\n \n ' url = (db_url + '/results') headers = {'Content-Type': 'application/json'} try: if logger: jsonified_params = json.dumps(details) logger.info(('Pushing results to %s' % url)) logger.debug(('Parameters: %s' % details)) r = requests.post(url, data=jsonified_params, headers=headers) if logger: logger.debug(r) logger.debug(r.status_code) logger.debug(r.content) return json.loads(r.content) except Exception: if logger: logger.exception(("Error [push_results_to_db('%s', '%s')]:" % (db_url, details))) return None
def push_results_to_db(db_url, details, logger): '\n \n ' url = (db_url + '/results') headers = {'Content-Type': 'application/json'} try: if logger: jsonified_params = json.dumps(details) logger.info(('Pushing results to %s' % url)) logger.debug(('Parameters: %s' % details)) r = requests.post(url, data=jsonified_params, headers=headers) if logger: logger.debug(r) logger.debug(r.status_code) logger.debug(r.content) return json.loads(r.content) except Exception: if logger: logger.exception(("Error [push_results_to_db('%s', '%s')]:" % (db_url, details))) return None<|docstring|>POST results to the Result target DB<|endoftext|>
b72db8f48a70cd256df5dd45f0581a42bb25eaa451afe6b4311dcd43dda18e2b
def setup_cuda_environment(gpu_id): 'Setup the GPU/CPU configuration for PyTorch.\n ' if (gpu_id < 0): print('Running on CPU...') os.environ['CUDA_VISIBLE_DEVICES'] = '' return False else: print('Running on GPU {0}...'.format(gpu_id)) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) return True
Setup the GPU/CPU configuration for PyTorch.
mm_action_prediction/tools/support.py
setup_cuda_environment
boychaboy/simmc
2
python
def setup_cuda_environment(gpu_id): '\n ' if (gpu_id < 0): print('Running on CPU...') os.environ['CUDA_VISIBLE_DEVICES'] = return False else: print('Running on GPU {0}...'.format(gpu_id)) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) return True
def setup_cuda_environment(gpu_id): '\n ' if (gpu_id < 0): print('Running on CPU...') os.environ['CUDA_VISIBLE_DEVICES'] = return False else: print('Running on GPU {0}...'.format(gpu_id)) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) return True<|docstring|>Setup the GPU/CPU configuration for PyTorch.<|endoftext|>
cfe511e42758eb79e120f1303d2fbb887963804772f60a9acda9818a7a329edf
def pretty_print_dict(parsed): 'Pretty print a parsed dictionary.\n ' max_len = max((len(ii) for ii in parsed.keys())) format_str = '\t{{:<{width}}}: {{}}'.format(width=max_len) print('Arguments:') for key in sorted(parsed.keys()): print(format_str.format(key, parsed[key])) print('')
Pretty print a parsed dictionary.
mm_action_prediction/tools/support.py
pretty_print_dict
boychaboy/simmc
2
python
def pretty_print_dict(parsed): '\n ' max_len = max((len(ii) for ii in parsed.keys())) format_str = '\t{{:<{width}}}: {{}}'.format(width=max_len) print('Arguments:') for key in sorted(parsed.keys()): print(format_str.format(key, parsed[key])) print()
def pretty_print_dict(parsed): '\n ' max_len = max((len(ii) for ii in parsed.keys())) format_str = '\t{{:<{width}}}: {{}}'.format(width=max_len) print('Arguments:') for key in sorted(parsed.keys()): print(format_str.format(key, parsed[key])) print()<|docstring|>Pretty print a parsed dictionary.<|endoftext|>
b6ea338d51c5cb33770290adf0ba0338511f653c91364821ccb71af3abb90b88
def print_distribution(counts, label=None): 'Prints distribution for a given histogram of counts.\n\n Args:\n counts: Dictionary of count histograms\n ' total_items = sum(counts.values()) max_length = max((len(str(ii)) for ii in counts.keys())) if (label is not None): print(label) format_str = '\t{{:<{width}}} [{{:.0f}}%]: {{}}'.format(width=max_length) sorted_counts = sorted(counts.items(), key=(lambda x: x[1]), reverse=True) for (key, val) in sorted_counts: print(format_str.format(key, ((100 * float(val)) / total_items), val))
Prints distribution for a given histogram of counts. Args: counts: Dictionary of count histograms
mm_action_prediction/tools/support.py
print_distribution
boychaboy/simmc
2
python
def print_distribution(counts, label=None): 'Prints distribution for a given histogram of counts.\n\n Args:\n counts: Dictionary of count histograms\n ' total_items = sum(counts.values()) max_length = max((len(str(ii)) for ii in counts.keys())) if (label is not None): print(label) format_str = '\t{{:<{width}}} [{{:.0f}}%]: {{}}'.format(width=max_length) sorted_counts = sorted(counts.items(), key=(lambda x: x[1]), reverse=True) for (key, val) in sorted_counts: print(format_str.format(key, ((100 * float(val)) / total_items), val))
def print_distribution(counts, label=None): 'Prints distribution for a given histogram of counts.\n\n Args:\n counts: Dictionary of count histograms\n ' total_items = sum(counts.values()) max_length = max((len(str(ii)) for ii in counts.keys())) if (label is not None): print(label) format_str = '\t{{:<{width}}} [{{:.0f}}%]: {{}}'.format(width=max_length) sorted_counts = sorted(counts.items(), key=(lambda x: x[1]), reverse=True) for (key, val) in sorted_counts: print(format_str.format(key, ((100 * float(val)) / total_items), val))<|docstring|>Prints distribution for a given histogram of counts. Args: counts: Dictionary of count histograms<|endoftext|>
de78969fcdadf077f2c0b718d6de5794014f49e817965e9ead6c005a7b49e43b
def sort_eval_metrics(eval_metrics): 'Sort a dictionary of evaluation metrics.\n\n Args:\n eval_metrics: Dict of evaluation metrics.\n\n Returns:\n sorted_evals: Sorted evaluated metrics, best first.\n ' def mean_relative_increase(arg1, arg2): (_, metric1) = arg1 (_, metric2) = arg2 rel_gain = [] for (higher_better, key) in [((- 1), 'perplexity'), (1, 'action_accuracy'), (1, 'action_attribute')]: rel_gain.append(((higher_better * (metric1[key] - metric2[key])) / ((metric1[key] + metric2[key]) + 1e-05))) return np.mean(rel_gain) sorted_evals = sorted(eval_metrics.items(), key=functools.cmp_to_key(mean_relative_increase), reverse=True) return sorted_evals
Sort a dictionary of evaluation metrics. Args: eval_metrics: Dict of evaluation metrics. Returns: sorted_evals: Sorted evaluated metrics, best first.
mm_action_prediction/tools/support.py
sort_eval_metrics
boychaboy/simmc
2
python
def sort_eval_metrics(eval_metrics): 'Sort a dictionary of evaluation metrics.\n\n Args:\n eval_metrics: Dict of evaluation metrics.\n\n Returns:\n sorted_evals: Sorted evaluated metrics, best first.\n ' def mean_relative_increase(arg1, arg2): (_, metric1) = arg1 (_, metric2) = arg2 rel_gain = [] for (higher_better, key) in [((- 1), 'perplexity'), (1, 'action_accuracy'), (1, 'action_attribute')]: rel_gain.append(((higher_better * (metric1[key] - metric2[key])) / ((metric1[key] + metric2[key]) + 1e-05))) return np.mean(rel_gain) sorted_evals = sorted(eval_metrics.items(), key=functools.cmp_to_key(mean_relative_increase), reverse=True) return sorted_evals
def sort_eval_metrics(eval_metrics): 'Sort a dictionary of evaluation metrics.\n\n Args:\n eval_metrics: Dict of evaluation metrics.\n\n Returns:\n sorted_evals: Sorted evaluated metrics, best first.\n ' def mean_relative_increase(arg1, arg2): (_, metric1) = arg1 (_, metric2) = arg2 rel_gain = [] for (higher_better, key) in [((- 1), 'perplexity'), (1, 'action_accuracy'), (1, 'action_attribute')]: rel_gain.append(((higher_better * (metric1[key] - metric2[key])) / ((metric1[key] + metric2[key]) + 1e-05))) return np.mean(rel_gain) sorted_evals = sorted(eval_metrics.items(), key=functools.cmp_to_key(mean_relative_increase), reverse=True) return sorted_evals<|docstring|>Sort a dictionary of evaluation metrics. Args: eval_metrics: Dict of evaluation metrics. Returns: sorted_evals: Sorted evaluated metrics, best first.<|endoftext|>
c1a8938328c531edaa792cf36192a81b1ecdfc1c11b15a7c178b9e2a89690a0f
def extract_split_from_filename(file_name): 'Extract the split from the filename.\n\n Args:\n file_name: JSON path to the split\n Return:\n split: Name of the split (train | dev | devtest | test)\n ' for split in ('train', 'devtest', 'dev', 'test'): if (split in file_name.split('/')[(- 1)]): return split
Extract the split from the filename. Args: file_name: JSON path to the split Return: split: Name of the split (train | dev | devtest | test)
mm_action_prediction/tools/support.py
extract_split_from_filename
boychaboy/simmc
2
python
def extract_split_from_filename(file_name): 'Extract the split from the filename.\n\n Args:\n file_name: JSON path to the split\n Return:\n split: Name of the split (train | dev | devtest | test)\n ' for split in ('train', 'devtest', 'dev', 'test'): if (split in file_name.split('/')[(- 1)]): return split
def extract_split_from_filename(file_name): 'Extract the split from the filename.\n\n Args:\n file_name: JSON path to the split\n Return:\n split: Name of the split (train | dev | devtest | test)\n ' for split in ('train', 'devtest', 'dev', 'test'): if (split in file_name.split('/')[(- 1)]): return split<|docstring|>Extract the split from the filename. Args: file_name: JSON path to the split Return: split: Name of the split (train | dev | devtest | test)<|endoftext|>
95090cc0ccfb0fd052ce4a100d5e6ad52f724059a1a5f140d23b38ee3cb45120
def report(self, new_val): 'Add a new score.\n\n Args:\n new_val: New value to record.\n ' if (self.value is None): self.value = new_val else: self.value = {key: self.op(value, new_val[key]) for (key, value) in self.value.items()} return self.value
Add a new score. Args: new_val: New value to record.
mm_action_prediction/tools/support.py
report
boychaboy/simmc
2
python
def report(self, new_val): 'Add a new score.\n\n Args:\n new_val: New value to record.\n ' if (self.value is None): self.value = new_val else: self.value = {key: self.op(value, new_val[key]) for (key, value) in self.value.items()} return self.value
def report(self, new_val): 'Add a new score.\n\n Args:\n new_val: New value to record.\n ' if (self.value is None): self.value = new_val else: self.value = {key: self.op(value, new_val[key]) for (key, value) in self.value.items()} return self.value<|docstring|>Add a new score. Args: new_val: New value to record.<|endoftext|>
bf99a74c3272df72cf13dba453b6a5c8b282ac54cdefbf7de982e7042b4afa38
def callbackfunc(blocknum, blocksize, totalsize): '回调函数\n @blocknum: 已经下载的数据块\n @blocksize: 数据块的大小\n @totalsize: 远程文件的大小\n ' percent = (((100.0 * blocknum) * blocksize) / totalsize) if (percent > 100): percent = 100 sys.stdout.write(('%.2f%%\r' % percent))
回调函数 @blocknum: 已经下载的数据块 @blocksize: 数据块的大小 @totalsize: 远程文件的大小
agrspy/chinaaqi.py
callbackfunc
soonyenju/histaqi
2
python
def callbackfunc(blocknum, blocksize, totalsize): '回调函数\n @blocknum: 已经下载的数据块\n @blocksize: 数据块的大小\n @totalsize: 远程文件的大小\n ' percent = (((100.0 * blocknum) * blocksize) / totalsize) if (percent > 100): percent = 100 sys.stdout.write(('%.2f%%\r' % percent))
def callbackfunc(blocknum, blocksize, totalsize): '回调函数\n @blocknum: 已经下载的数据块\n @blocksize: 数据块的大小\n @totalsize: 远程文件的大小\n ' percent = (((100.0 * blocknum) * blocksize) / totalsize) if (percent > 100): percent = 100 sys.stdout.write(('%.2f%%\r' % percent))<|docstring|>回调函数 @blocknum: 已经下载的数据块 @blocksize: 数据块的大小 @totalsize: 远程文件的大小<|endoftext|>
f032fc0c1b9f1642b51c881988b9fcf7206209bc3ebb3c4dafc521a64c76f79b
def run_task(*_): 'Implement the run_task method needed to run experiments with rllab.' sim_params = SumoParams(sim_step=0.1, render=True) vehicles = VehicleParams() vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=1) vehicles.add(veh_id='idm', acceleration_controller=(IDMController, {'noise': 0.2}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=13) additional_env_params = {'target_velocity': 20, 'max_accel': 3, 'max_decel': 3, 'sort_vehicles': False} env_params = EnvParams(horizon=HORIZON, additional_params=additional_env_params) additional_net_params = {'radius_ring': 30, 'lanes': 1, 'speed_limit': 30, 'resolution': 40} net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(spacing='uniform') print('XXX name', exp_tag) scenario = Figure8Scenario(exp_tag, vehicles, net_params, initial_config=initial_config) env_name = 'AccelEnv' pass_params = (env_name, sim_params, vehicles, env_params, net_params, initial_config, scenario) env = GymEnv(env_name, record_video=False, register_params=pass_params) horizon = env.horizon env = normalize(env) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(16, 16)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env=env, policy=policy, baseline=baseline, batch_size=15000, max_path_length=horizon, n_itr=500, discount=0.999) (algo.train(),)
Implement the run_task method needed to run experiments with rllab.
examples/rllab/figure_eight.py
run_task
kjang96/flow-1
71
python
def run_task(*_): sim_params = SumoParams(sim_step=0.1, render=True) vehicles = VehicleParams() vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=1) vehicles.add(veh_id='idm', acceleration_controller=(IDMController, {'noise': 0.2}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=13) additional_env_params = {'target_velocity': 20, 'max_accel': 3, 'max_decel': 3, 'sort_vehicles': False} env_params = EnvParams(horizon=HORIZON, additional_params=additional_env_params) additional_net_params = {'radius_ring': 30, 'lanes': 1, 'speed_limit': 30, 'resolution': 40} net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(spacing='uniform') print('XXX name', exp_tag) scenario = Figure8Scenario(exp_tag, vehicles, net_params, initial_config=initial_config) env_name = 'AccelEnv' pass_params = (env_name, sim_params, vehicles, env_params, net_params, initial_config, scenario) env = GymEnv(env_name, record_video=False, register_params=pass_params) horizon = env.horizon env = normalize(env) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(16, 16)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env=env, policy=policy, baseline=baseline, batch_size=15000, max_path_length=horizon, n_itr=500, discount=0.999) (algo.train(),)
def run_task(*_): sim_params = SumoParams(sim_step=0.1, render=True) vehicles = VehicleParams() vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=1) vehicles.add(veh_id='idm', acceleration_controller=(IDMController, {'noise': 0.2}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams(speed_mode='obey_safe_speed', decel=1.5), num_vehicles=13) additional_env_params = {'target_velocity': 20, 'max_accel': 3, 'max_decel': 3, 'sort_vehicles': False} env_params = EnvParams(horizon=HORIZON, additional_params=additional_env_params) additional_net_params = {'radius_ring': 30, 'lanes': 1, 'speed_limit': 30, 'resolution': 40} net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(spacing='uniform') print('XXX name', exp_tag) scenario = Figure8Scenario(exp_tag, vehicles, net_params, initial_config=initial_config) env_name = 'AccelEnv' pass_params = (env_name, sim_params, vehicles, env_params, net_params, initial_config, scenario) env = GymEnv(env_name, record_video=False, register_params=pass_params) horizon = env.horizon env = normalize(env) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(16, 16)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env=env, policy=policy, baseline=baseline, batch_size=15000, max_path_length=horizon, n_itr=500, discount=0.999) (algo.train(),)<|docstring|>Implement the run_task method needed to run experiments with rllab.<|endoftext|>
d96e0703d4c888c71b1c810ba774f5dad175fb20f3baf7d114c8d7a04a99a742
def dbn_writer(writer=None, hints: dict=None, positions: dict=None, boxes: set=None, factor_positions: dict=None, binary_edges=False, **kwargs): ' Create a DotWriter depending on input arguments:\n If writer is supplied, we will add but not overwrite hints or positions.\n ' if ((writer is None) and (hints is None) and (positions is None) and (boxes is None) and (factor_positions is None) and (binary_edges == False)): return None writer = (GraphvizFormatting() if (writer is None) else writer) writer.paperHorizontalAxis = Axis.X writer.paperVerticalAxis = Axis.Y if (hints is not None): assert isinstance(hints, dict) ph: dict = writer.positionHints for (key, y) in hints.items(): if (key not in ph): ph[key] = y writer.positionHints = ph if (positions is not None): assert isinstance(positions, dict) kp: dict = writer.variablePositions for (key, position) in positions.items(): if (key not in kp): kp[key] = position writer.variablePositions = kp if (boxes is not None): assert isinstance(boxes, set) bx: set = writer.boxes for key in boxes: bx.add(key) writer.boxes = bx if (factor_positions is not None): assert isinstance(factor_positions, dict) kp: dict = writer.factorPositions for (i, position) in factor_positions.items(): if (i not in kp): kp[i] = position writer.factorPositions = kp writer.binaryEdges = binary_edges return writer
Create a DotWriter depending on input arguments: If writer is supplied, we will add but not overwrite hints or positions.
gtbook/dbn.py
dbn_writer
dellaert/nbdev_test
5
python
def dbn_writer(writer=None, hints: dict=None, positions: dict=None, boxes: set=None, factor_positions: dict=None, binary_edges=False, **kwargs): ' Create a DotWriter depending on input arguments:\n If writer is supplied, we will add but not overwrite hints or positions.\n ' if ((writer is None) and (hints is None) and (positions is None) and (boxes is None) and (factor_positions is None) and (binary_edges == False)): return None writer = (GraphvizFormatting() if (writer is None) else writer) writer.paperHorizontalAxis = Axis.X writer.paperVerticalAxis = Axis.Y if (hints is not None): assert isinstance(hints, dict) ph: dict = writer.positionHints for (key, y) in hints.items(): if (key not in ph): ph[key] = y writer.positionHints = ph if (positions is not None): assert isinstance(positions, dict) kp: dict = writer.variablePositions for (key, position) in positions.items(): if (key not in kp): kp[key] = position writer.variablePositions = kp if (boxes is not None): assert isinstance(boxes, set) bx: set = writer.boxes for key in boxes: bx.add(key) writer.boxes = bx if (factor_positions is not None): assert isinstance(factor_positions, dict) kp: dict = writer.factorPositions for (i, position) in factor_positions.items(): if (i not in kp): kp[i] = position writer.factorPositions = kp writer.binaryEdges = binary_edges return writer
def dbn_writer(writer=None, hints: dict=None, positions: dict=None, boxes: set=None, factor_positions: dict=None, binary_edges=False, **kwargs): ' Create a DotWriter depending on input arguments:\n If writer is supplied, we will add but not overwrite hints or positions.\n ' if ((writer is None) and (hints is None) and (positions is None) and (boxes is None) and (factor_positions is None) and (binary_edges == False)): return None writer = (GraphvizFormatting() if (writer is None) else writer) writer.paperHorizontalAxis = Axis.X writer.paperVerticalAxis = Axis.Y if (hints is not None): assert isinstance(hints, dict) ph: dict = writer.positionHints for (key, y) in hints.items(): if (key not in ph): ph[key] = y writer.positionHints = ph if (positions is not None): assert isinstance(positions, dict) kp: dict = writer.variablePositions for (key, position) in positions.items(): if (key not in kp): kp[key] = position writer.variablePositions = kp if (boxes is not None): assert isinstance(boxes, set) bx: set = writer.boxes for key in boxes: bx.add(key) writer.boxes = bx if (factor_positions is not None): assert isinstance(factor_positions, dict) kp: dict = writer.factorPositions for (i, position) in factor_positions.items(): if (i not in kp): kp[i] = position writer.factorPositions = kp writer.binaryEdges = binary_edges return writer<|docstring|>Create a DotWriter depending on input arguments: If writer is supplied, we will add but not overwrite hints or positions.<|endoftext|>
aece204fd2070deff1238492800bce8a87e518362e7b5ba9a0e9bd20a5f122a7
def has_positions(writer): 'Check if writer has positions for engine selection' if (writer is None): return False return ((len(writer.positionHints) > 0) or (len(writer.variablePositions) > 0) or (len(writer.factorPositions) > 0))
Check if writer has positions for engine selection
gtbook/dbn.py
has_positions
dellaert/nbdev_test
5
python
def has_positions(writer): if (writer is None): return False return ((len(writer.positionHints) > 0) or (len(writer.variablePositions) > 0) or (len(writer.factorPositions) > 0))
def has_positions(writer): if (writer is None): return False return ((len(writer.positionHints) > 0) or (len(writer.variablePositions) > 0) or (len(writer.factorPositions) > 0))<|docstring|>Check if writer has positions for engine selection<|endoftext|>
611735210ca3f4781f5643666d3688adf439bf4998ef4ab7d70926a9243304df
def Process2JSON(self): '\n Returns\n ------\n str\n processSTR\n ' processSTR = json.dumps(self.process) return processSTR
Returns ------ str processSTR
pds_pipelines/process.py
Process2JSON
amystamile-usgs/PDS-Pipelines
8
python
def Process2JSON(self): '\n Returns\n ------\n str\n processSTR\n ' processSTR = json.dumps(self.process) return processSTR
def Process2JSON(self): '\n Returns\n ------\n str\n processSTR\n ' processSTR = json.dumps(self.process) return processSTR<|docstring|>Returns ------ str processSTR<|endoftext|>
fe243908da51f53ef1e927ecf9f96e63d7b61bb13cadfa352668984db637e789
def JSON2Process(self, element): '\n Parameters\n ----------\n element\n\n Returns\n -------\n str\n JSONout\n ' JSONout = json.loads(element, object_pairs_hook=OrderedDict) processDict = {} for process in JSONout: processDict[str(process)] = OrderedDict() self.process = processDict self.processName = process for (key, value) in JSONout[process].items(): self.process[self.processName][str(key)] = str(value) return JSONout
Parameters ---------- element Returns ------- str JSONout
pds_pipelines/process.py
JSON2Process
amystamile-usgs/PDS-Pipelines
8
python
def JSON2Process(self, element): '\n Parameters\n ----------\n element\n\n Returns\n -------\n str\n JSONout\n ' JSONout = json.loads(element, object_pairs_hook=OrderedDict) processDict = {} for process in JSONout: processDict[str(process)] = OrderedDict() self.process = processDict self.processName = process for (key, value) in JSONout[process].items(): self.process[self.processName][str(key)] = str(value) return JSONout
def JSON2Process(self, element): '\n Parameters\n ----------\n element\n\n Returns\n -------\n str\n JSONout\n ' JSONout = json.loads(element, object_pairs_hook=OrderedDict) processDict = {} for process in JSONout: processDict[str(process)] = OrderedDict() self.process = processDict self.processName = process for (key, value) in JSONout[process].items(): self.process[self.processName][str(key)] = str(value) return JSONout<|docstring|>Parameters ---------- element Returns ------- str JSONout<|endoftext|>
e8ff1c37eb6f2f88cda500b40be5b86a664cc7aee34f83bb6c6f0824a9252a26
def Process2Redis(self, redisOBJ): '\n Parameters\n ----------\n redisOBJ\n ' jsonSTR = json.dumps(self.process) redisOBJ.QueueAdd(jsonSTR)
Parameters ---------- redisOBJ
pds_pipelines/process.py
Process2Redis
amystamile-usgs/PDS-Pipelines
8
python
def Process2Redis(self, redisOBJ): '\n Parameters\n ----------\n redisOBJ\n ' jsonSTR = json.dumps(self.process) redisOBJ.QueueAdd(jsonSTR)
def Process2Redis(self, redisOBJ): '\n Parameters\n ----------\n redisOBJ\n ' jsonSTR = json.dumps(self.process) redisOBJ.QueueAdd(jsonSTR)<|docstring|>Parameters ---------- redisOBJ<|endoftext|>
04965806a7c0537b8041d932ec0fb685814a1f2229f6ff9a48468ff157941e6a
def setProcess(self, process): '\n Parameters\n ----------\n process\n ' self.processName = str(process)
Parameters ---------- process
pds_pipelines/process.py
setProcess
amystamile-usgs/PDS-Pipelines
8
python
def setProcess(self, process): '\n Parameters\n ----------\n process\n ' self.processName = str(process)
def setProcess(self, process): '\n Parameters\n ----------\n process\n ' self.processName = str(process)<|docstring|>Parameters ---------- process<|endoftext|>
d726abeb958db80ad6e44658030a4cead10f586defe756d95a5f319856fdd9d0
def ChangeProcess(self, newproc): '\n Parameters\n ----------\n newproc\n ' NewDict = {} NewDict[newproc] = OrderedDict() for (k, v) in self.process[self.processName].items(): NewDict[newproc][k] = v self.process = NewDict self.processName = newproc
Parameters ---------- newproc
pds_pipelines/process.py
ChangeProcess
amystamile-usgs/PDS-Pipelines
8
python
def ChangeProcess(self, newproc): '\n Parameters\n ----------\n newproc\n ' NewDict = {} NewDict[newproc] = OrderedDict() for (k, v) in self.process[self.processName].items(): NewDict[newproc][k] = v self.process = NewDict self.processName = newproc
def ChangeProcess(self, newproc): '\n Parameters\n ----------\n newproc\n ' NewDict = {} NewDict[newproc] = OrderedDict() for (k, v) in self.process[self.processName].items(): NewDict[newproc][k] = v self.process = NewDict self.processName = newproc<|docstring|>Parameters ---------- newproc<|endoftext|>
a193d92a0c7b62c2738947e04bb707f4bca444c9ae7afeb2e63acfae5261938a
def getProcess(self): '\n Returns\n -------\n dict\n process\n ' return self.process
Returns ------- dict process
pds_pipelines/process.py
getProcess
amystamile-usgs/PDS-Pipelines
8
python
def getProcess(self): '\n Returns\n -------\n dict\n process\n ' return self.process
def getProcess(self): '\n Returns\n -------\n dict\n process\n ' return self.process<|docstring|>Returns ------- dict process<|endoftext|>
1d912dfbd58861965e35433f68a12e8c1ec4f93757a1401de6cdbb3bd9941e07
def getProcessName(self): '\n Returns\n ------\n str\n processName\n ' return self.processName
Returns ------ str processName
pds_pipelines/process.py
getProcessName
amystamile-usgs/PDS-Pipelines
8
python
def getProcessName(self): '\n Returns\n ------\n str\n processName\n ' return self.processName
def getProcessName(self): '\n Returns\n ------\n str\n processName\n ' return self.processName<|docstring|>Returns ------ str processName<|endoftext|>
371f8b279562ea459822051f200da005f1c1ef2ebd87d5a3f07bfc3ca590c58c
def LogCommandline(self): '\n Returns\n -------\n str\n commandSTr\n ' tempSTR = self.processName for (key, value) in self.process[self.processName].items(): if ((key == 'from_') or (key == 'to') or (key == 'map')): subfile = value.split('/') value = subfile[(- 1)] tempSTR += (((' ' + key) + '=') + value) commandSTR = tempSTR.replace('from_', 'from') return commandSTR
Returns ------- str commandSTr
pds_pipelines/process.py
LogCommandline
amystamile-usgs/PDS-Pipelines
8
python
def LogCommandline(self): '\n Returns\n -------\n str\n commandSTr\n ' tempSTR = self.processName for (key, value) in self.process[self.processName].items(): if ((key == 'from_') or (key == 'to') or (key == 'map')): subfile = value.split('/') value = subfile[(- 1)] tempSTR += (((' ' + key) + '=') + value) commandSTR = tempSTR.replace('from_', 'from') return commandSTR
def LogCommandline(self): '\n Returns\n -------\n str\n commandSTr\n ' tempSTR = self.processName for (key, value) in self.process[self.processName].items(): if ((key == 'from_') or (key == 'to') or (key == 'map')): subfile = value.split('/') value = subfile[(- 1)] tempSTR += (((' ' + key) + '=') + value) commandSTR = tempSTR.replace('from_', 'from') return commandSTR<|docstring|>Returns ------- str commandSTr<|endoftext|>
3a62bd6b8ffcb731e5a5469868be026973d8f545d584d91a4fb023798d275669
def LogHelpLink(self): '\n Returns\n -------\n str\n helplink\n ' helplink = (((('https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/' + self.processName) + '/') + self.processName) + '.html') return helplink
Returns ------- str helplink
pds_pipelines/process.py
LogHelpLink
amystamile-usgs/PDS-Pipelines
8
python
def LogHelpLink(self): '\n Returns\n -------\n str\n helplink\n ' helplink = (((('https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/' + self.processName) + '/') + self.processName) + '.html') return helplink
def LogHelpLink(self): '\n Returns\n -------\n str\n helplink\n ' helplink = (((('https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/' + self.processName) + '/') + self.processName) + '.html') return helplink<|docstring|>Returns ------- str helplink<|endoftext|>
d116e5e8058fac6d2273c217d4dac84cec271c2a41fd97b73662b568d4ef3d53
def ProcessFromRecipe(self, process, recipe): '\n Returns\n -------\n dict\n process\n ' for Rprocess in recipe: for (key, value) in Rprocess.items(): if (key == process): self.processName = key self.process = Rprocess return self.process
Returns ------- dict process
pds_pipelines/process.py
ProcessFromRecipe
amystamile-usgs/PDS-Pipelines
8
python
def ProcessFromRecipe(self, process, recipe): '\n Returns\n -------\n dict\n process\n ' for Rprocess in recipe: for (key, value) in Rprocess.items(): if (key == process): self.processName = key self.process = Rprocess return self.process
def ProcessFromRecipe(self, process, recipe): '\n Returns\n -------\n dict\n process\n ' for Rprocess in recipe: for (key, value) in Rprocess.items(): if (key == process): self.processName = key self.process = Rprocess return self.process<|docstring|>Returns ------- dict process<|endoftext|>
4ccb49a2ac834f801e5e4635ce95fa78d346aa7dbda12a53786c652a6f863d80
def updateParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' for (key, value) in self.process[self.processName].items(): if (key == param): self.process[self.processName][key] = newValue
Parameters ---------- param newValue
pds_pipelines/process.py
updateParameter
amystamile-usgs/PDS-Pipelines
8
python
def updateParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' for (key, value) in self.process[self.processName].items(): if (key == param): self.process[self.processName][key] = newValue
def updateParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' for (key, value) in self.process[self.processName].items(): if (key == param): self.process[self.processName][key] = newValue<|docstring|>Parameters ---------- param newValue<|endoftext|>
815dc7cb90866bfe0e775b3734433c4ce6cedff4374dba17169823942779d896
def newProcess(self, process): '\n Parameters\n ----------\n process\n ' processDict = {} processDict[process] = OrderedDict() self.process = processDict self.processName = process
Parameters ---------- process
pds_pipelines/process.py
newProcess
amystamile-usgs/PDS-Pipelines
8
python
def newProcess(self, process): '\n Parameters\n ----------\n process\n ' processDict = {} processDict[process] = OrderedDict() self.process = processDict self.processName = process
def newProcess(self, process): '\n Parameters\n ----------\n process\n ' processDict = {} processDict[process] = OrderedDict() self.process = processDict self.processName = process<|docstring|>Parameters ---------- process<|endoftext|>
be83ebeea2b2b95d35526cf452433314a81d5d731ad5e2a081da933d58bd8852
def AddParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' testDict = {param: newValue} for (k, v) in testDict.items(): self.process[self.processName][str(k)] = str(v)
Parameters ---------- param newValue
pds_pipelines/process.py
AddParameter
amystamile-usgs/PDS-Pipelines
8
python
def AddParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' testDict = {param: newValue} for (k, v) in testDict.items(): self.process[self.processName][str(k)] = str(v)
def AddParameter(self, param, newValue): '\n Parameters\n ----------\n param\n newValue\n ' testDict = {param: newValue} for (k, v) in testDict.items(): self.process[self.processName][str(k)] = str(v)<|docstring|>Parameters ---------- param newValue<|endoftext|>
81180aa7432a6a7e07f1304709f53b1600a1a255a4bc6c4aef2d3c9ce2b33fc7
def GDAL_OBit(self, ibit): '\n Parameters\n ----------\n ibit\n\n Returns\n -------\n dict\n bitDICT[ibit]\n ' bitDICT = {'unsignedbyte': 'Byte', 'signedword': 'Int16', 'real': 'Float32'} try: return bitDICT[ibit] except KeyError: raise Exception((f'Unsupported ibit type given {ibit}. ' + f'Currently supported bit types are {list(bitDICT.keys())}'))
Parameters ---------- ibit Returns ------- dict bitDICT[ibit]
pds_pipelines/process.py
GDAL_OBit
amystamile-usgs/PDS-Pipelines
8
python
def GDAL_OBit(self, ibit): '\n Parameters\n ----------\n ibit\n\n Returns\n -------\n dict\n bitDICT[ibit]\n ' bitDICT = {'unsignedbyte': 'Byte', 'signedword': 'Int16', 'real': 'Float32'} try: return bitDICT[ibit] except KeyError: raise Exception((f'Unsupported ibit type given {ibit}. ' + f'Currently supported bit types are {list(bitDICT.keys())}'))
def GDAL_OBit(self, ibit): '\n Parameters\n ----------\n ibit\n\n Returns\n -------\n dict\n bitDICT[ibit]\n ' bitDICT = {'unsignedbyte': 'Byte', 'signedword': 'Int16', 'real': 'Float32'} try: return bitDICT[ibit] except KeyError: raise Exception((f'Unsupported ibit type given {ibit}. ' + f'Currently supported bit types are {list(bitDICT.keys())}'))<|docstring|>Parameters ---------- ibit Returns ------- dict bitDICT[ibit]<|endoftext|>
82f07837bd6beefe681506b1544efd3a1e81571ed3b4c257292b7dffdb7b3cd7
def GDAL_Creation(self, format): '\n Parameters\n ----------\n format\n\n Returns\n -------\n dict\n cDICT[format]\n ' cDICT = {'JPEG': 'quality=100', 'JP2KAK': 'quality=100', 'GTiff': 'bigtiff=if_safer'} try: return cDICT[format] except KeyError: raise Exception((f'Unsupported format {format}. ' + f'Currently supported bit types are {list(cDICT.keys())}'))
Parameters ---------- format Returns ------- dict cDICT[format]
pds_pipelines/process.py
GDAL_Creation
amystamile-usgs/PDS-Pipelines
8
python
def GDAL_Creation(self, format): '\n Parameters\n ----------\n format\n\n Returns\n -------\n dict\n cDICT[format]\n ' cDICT = {'JPEG': 'quality=100', 'JP2KAK': 'quality=100', 'GTiff': 'bigtiff=if_safer'} try: return cDICT[format] except KeyError: raise Exception((f'Unsupported format {format}. ' + f'Currently supported bit types are {list(cDICT.keys())}'))
def GDAL_Creation(self, format): '\n Parameters\n ----------\n format\n\n Returns\n -------\n dict\n cDICT[format]\n ' cDICT = {'JPEG': 'quality=100', 'JP2KAK': 'quality=100', 'GTiff': 'bigtiff=if_safer'} try: return cDICT[format] except KeyError: raise Exception((f'Unsupported format {format}. ' + f'Currently supported bit types are {list(cDICT.keys())}'))<|docstring|>Parameters ---------- format Returns ------- dict cDICT[format]<|endoftext|>
a7c31a652525b038f758b24c531c1bc4face8a205781144818aa1870a0dd4c9a
@DataChannel def time(self, chan): 'The current time, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%I:%M:%S %p``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current time as a formatted string. Default HH:MM:SS AM\n\n Channel syntax::\n\n clock:time\n clock:time?string\n clock:time?string&format=%S\n\n ' return chan.value
The current time, updated every second. Args: format (str) : Format spec. Defaults to ``%I:%M:%S %p``. See http://strftime.org for supported formats. Returns: The current time as a formatted string. Default HH:MM:SS AM Channel syntax:: clock:time clock:time?string clock:time?string&format=%S
qtpyvcp/plugins/clock.py
time
robertspark/qtpyvcp
71
python
@DataChannel def time(self, chan): 'The current time, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%I:%M:%S %p``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current time as a formatted string. Default HH:MM:SS AM\n\n Channel syntax::\n\n clock:time\n clock:time?string\n clock:time?string&format=%S\n\n ' return chan.value
@DataChannel def time(self, chan): 'The current time, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%I:%M:%S %p``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current time as a formatted string. Default HH:MM:SS AM\n\n Channel syntax::\n\n clock:time\n clock:time?string\n clock:time?string&format=%S\n\n ' return chan.value<|docstring|>The current time, updated every second. Args: format (str) : Format spec. Defaults to ``%I:%M:%S %p``. See http://strftime.org for supported formats. Returns: The current time as a formatted string. Default HH:MM:SS AM Channel syntax:: clock:time clock:time?string clock:time?string&format=%S<|endoftext|>
937cfff15f2e85dc10d8f74bb8e7ff3dc2199a53e29724f38321bacd0e83e913
@DataChannel def date(self, chan): 'The current date, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%m/%d/%Y``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current date as a formatted string. Default MM/DD/YYYY\n\n Channel syntax::\n\n clock:date\n clock:date?string\n clock:date?string&format=%Y\n\n ' return chan.value
The current date, updated every second. Args: format (str) : Format spec. Defaults to ``%m/%d/%Y``. See http://strftime.org for supported formats. Returns: The current date as a formatted string. Default MM/DD/YYYY Channel syntax:: clock:date clock:date?string clock:date?string&format=%Y
qtpyvcp/plugins/clock.py
date
robertspark/qtpyvcp
71
python
@DataChannel def date(self, chan): 'The current date, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%m/%d/%Y``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current date as a formatted string. Default MM/DD/YYYY\n\n Channel syntax::\n\n clock:date\n clock:date?string\n clock:date?string&format=%Y\n\n ' return chan.value
@DataChannel def date(self, chan): 'The current date, updated every second.\n\n Args:\n format (str) : Format spec. Defaults to ``%m/%d/%Y``.\n See http://strftime.org for supported formats.\n\n Returns:\n The current date as a formatted string. Default MM/DD/YYYY\n\n Channel syntax::\n\n clock:date\n clock:date?string\n clock:date?string&format=%Y\n\n ' return chan.value<|docstring|>The current date, updated every second. Args: format (str) : Format spec. Defaults to ``%m/%d/%Y``. See http://strftime.org for supported formats. Returns: The current date as a formatted string. Default MM/DD/YYYY Channel syntax:: clock:date clock:date?string clock:date?string&format=%Y<|endoftext|>
2b625ce43b24a7509c98bedbc4fba022a2a5b605453acfeabceb079644d22117
def collect(): "\n Garbage-collect any items that don't have any references to them anymore.\n " if sys_tools.is_pypy: for _ in range(3): gc.collect() else: gc.collect()
Garbage-collect any items that don't have any references to them anymore.
python_toolbox/gc_tools.py
collect
hboshnak/python_toolbox
119
python
def collect(): "\n \n " if sys_tools.is_pypy: for _ in range(3): gc.collect() else: gc.collect()
def collect(): "\n \n " if sys_tools.is_pypy: for _ in range(3): gc.collect() else: gc.collect()<|docstring|>Garbage-collect any items that don't have any references to them anymore.<|endoftext|>
f7f0af5977e16d5f015ed9301eee19fde65ccf39fb26d1643caf47a8afa63806
def voc_colormap(labels) -> np.ndarray: 'Color map used in PASCAL VOC\n Args:\n labels (iterable of ints): Class ids.\n Returns:\n numpy.ndarray: Colors in RGB order. The shape is :math:`(N, 3)`,\n where :math:`N` is the size of :obj:`labels`. The range of the values\n is :math:`[0, 255]`.\n ' colors = [] for label in labels: (r, g, b) = (0, 0, 0) i = label for j in range(8): if (i & (1 << 0)): r |= (1 << (7 - j)) if (i & (1 << 1)): g |= (1 << (7 - j)) if (i & (1 << 2)): b |= (1 << (7 - j)) i >>= 3 colors.append((r, g, b)) return np.array(colors, dtype=np.float32)
Color map used in PASCAL VOC Args: labels (iterable of ints): Class ids. Returns: numpy.ndarray: Colors in RGB order. The shape is :math:`(N, 3)`, where :math:`N` is the size of :obj:`labels`. The range of the values is :math:`[0, 255]`.
utils/colormap.py
voc_colormap
kktsubota/manga-character-screentone
2
python
def voc_colormap(labels) -> np.ndarray: 'Color map used in PASCAL VOC\n Args:\n labels (iterable of ints): Class ids.\n Returns:\n numpy.ndarray: Colors in RGB order. The shape is :math:`(N, 3)`,\n where :math:`N` is the size of :obj:`labels`. The range of the values\n is :math:`[0, 255]`.\n ' colors = [] for label in labels: (r, g, b) = (0, 0, 0) i = label for j in range(8): if (i & (1 << 0)): r |= (1 << (7 - j)) if (i & (1 << 1)): g |= (1 << (7 - j)) if (i & (1 << 2)): b |= (1 << (7 - j)) i >>= 3 colors.append((r, g, b)) return np.array(colors, dtype=np.float32)
def voc_colormap(labels) -> np.ndarray: 'Color map used in PASCAL VOC\n Args:\n labels (iterable of ints): Class ids.\n Returns:\n numpy.ndarray: Colors in RGB order. The shape is :math:`(N, 3)`,\n where :math:`N` is the size of :obj:`labels`. The range of the values\n is :math:`[0, 255]`.\n ' colors = [] for label in labels: (r, g, b) = (0, 0, 0) i = label for j in range(8): if (i & (1 << 0)): r |= (1 << (7 - j)) if (i & (1 << 1)): g |= (1 << (7 - j)) if (i & (1 << 2)): b |= (1 << (7 - j)) i >>= 3 colors.append((r, g, b)) return np.array(colors, dtype=np.float32)<|docstring|>Color map used in PASCAL VOC Args: labels (iterable of ints): Class ids. Returns: numpy.ndarray: Colors in RGB order. The shape is :math:`(N, 3)`, where :math:`N` is the size of :obj:`labels`. The range of the values is :math:`[0, 255]`.<|endoftext|>
427301773a92331d3395f00b4fec5facbcfe963764bf50ec7cadea30bbbb22c3
def parse(self, data, _stock_id): 'parse data from hexun request\n\n :raise:\n exceptions if data from hexun is not well-formated\n ' def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data)) json_string = prepare_data(data) obj = json.loads(json_string) return (self._generate_stock(obj),)
parse data from hexun request :raise: exceptions if data from hexun is not well-formated
cstock/hexun_engine.py
parse
dwarf-miner/midas
0
python
def parse(self, data, _stock_id): 'parse data from hexun request\n\n :raise:\n exceptions if data from hexun is not well-formated\n ' def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data)) json_string = prepare_data(data) obj = json.loads(json_string) return (self._generate_stock(obj),)
def parse(self, data, _stock_id): 'parse data from hexun request\n\n :raise:\n exceptions if data from hexun is not well-formated\n ' def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data)) json_string = prepare_data(data) obj = json.loads(json_string) return (self._generate_stock(obj),)<|docstring|>parse data from hexun request :raise: exceptions if data from hexun is not well-formated<|endoftext|>
9b5ac2f7938b257d6e2920e0d4a40f6e658f33847795851f970979597f5c325d
@staticmethod def _generate_stock(obj): "obj structure is {'1000626': {'code': ...}}\n " stock = obj.values()[0] code = stock.get('code', None) if (code is not None): code = code[1:] timestr = stock.get('time', None) if (timestr is not None): times = timestr.split(' ') date = datetime.datetime.strptime(times[0], '%Y/%m/%d').date() time = datetime.datetime.strptime(times[1], '%H:%M:%S').time() else: time = None date = None return Stock(code=code, name=stock.get('name', None), price=stock.get('price', None), time=time, date=date, open=stock.get('open', None), yesterday_close=stock.get('yestclose', None), low=stock.get('low', None), high=stock.get('high', None), volume=stock.get('volume', None), turnover=stock.get('turnover', None))
obj structure is {'1000626': {'code': ...}}
cstock/hexun_engine.py
_generate_stock
dwarf-miner/midas
0
python
@staticmethod def _generate_stock(obj): "\n " stock = obj.values()[0] code = stock.get('code', None) if (code is not None): code = code[1:] timestr = stock.get('time', None) if (timestr is not None): times = timestr.split(' ') date = datetime.datetime.strptime(times[0], '%Y/%m/%d').date() time = datetime.datetime.strptime(times[1], '%H:%M:%S').time() else: time = None date = None return Stock(code=code, name=stock.get('name', None), price=stock.get('price', None), time=time, date=date, open=stock.get('open', None), yesterday_close=stock.get('yestclose', None), low=stock.get('low', None), high=stock.get('high', None), volume=stock.get('volume', None), turnover=stock.get('turnover', None))
@staticmethod def _generate_stock(obj): "\n " stock = obj.values()[0] code = stock.get('code', None) if (code is not None): code = code[1:] timestr = stock.get('time', None) if (timestr is not None): times = timestr.split(' ') date = datetime.datetime.strptime(times[0], '%Y/%m/%d').date() time = datetime.datetime.strptime(times[1], '%H:%M:%S').time() else: time = None date = None return Stock(code=code, name=stock.get('name', None), price=stock.get('price', None), time=time, date=date, open=stock.get('open', None), yesterday_close=stock.get('yestclose', None), low=stock.get('low', None), high=stock.get('high', None), volume=stock.get('volume', None), turnover=stock.get('turnover', None))<|docstring|>obj structure is {'1000626': {'code': ...}}<|endoftext|>
1273ba7fc1602f29594e5f08b3a388e7c5dbeaa7fc6b48989d13d5ef94837777
def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data))
because hexun does not return a standard json, we need to extract the real json part
cstock/hexun_engine.py
prepare_data
dwarf-miner/midas
0
python
def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data))
def prepare_data(data): 'because hexun does not return a standard json,\n we need to extract the real json part\n ' regroup = re.match('^_ntes_quote_callback\\((.*)\\)', data) if regroup: return regroup.group(1) else: raise ParserException(('Unable to extact json from %s' % data))<|docstring|>because hexun does not return a standard json, we need to extract the real json part<|endoftext|>
dc3220e2a748eaa8178a89fd9979bba876ece0c4c4020747a6f4d6a8db16c336
@staticmethod def convert_shortcut_buttons(items): "\n support shortcut buttons [{'type':'web_url', 'title':'open web url', 'value':'https://~~'}]\n " if ((items is not None) and isinstance(items, list)): result = [] for item in items: if isinstance(item, BaseButton): result.append(item) elif isinstance(item, dict): if (item.get('type') in ['web_url', 'postback', 'phone_number']): type = item.get('type') title = item.get('title') value = item.get('value', item.get('url', item.get('payload'))) if (type == 'web_url'): result.append(ButtonWeb(title=title, url=value)) elif (type == 'postback'): result.append(ButtonPostBack(title=title, payload=value)) elif (type == 'phone_number'): result.append(ButtonPhoneNumber(title=title, payload=value)) else: raise ValueError('Invalid button type') else: raise ValueError('Invalid buttons variables') return result else: return items
support shortcut buttons [{'type':'web_url', 'title':'open web url', 'value':'https://~~'}]
fbmq/template.py
convert_shortcut_buttons
antikytheraton/supa-bbot
4
python
@staticmethod def convert_shortcut_buttons(items): "\n \n " if ((items is not None) and isinstance(items, list)): result = [] for item in items: if isinstance(item, BaseButton): result.append(item) elif isinstance(item, dict): if (item.get('type') in ['web_url', 'postback', 'phone_number']): type = item.get('type') title = item.get('title') value = item.get('value', item.get('url', item.get('payload'))) if (type == 'web_url'): result.append(ButtonWeb(title=title, url=value)) elif (type == 'postback'): result.append(ButtonPostBack(title=title, payload=value)) elif (type == 'phone_number'): result.append(ButtonPhoneNumber(title=title, payload=value)) else: raise ValueError('Invalid button type') else: raise ValueError('Invalid buttons variables') return result else: return items
@staticmethod def convert_shortcut_buttons(items): "\n \n " if ((items is not None) and isinstance(items, list)): result = [] for item in items: if isinstance(item, BaseButton): result.append(item) elif isinstance(item, dict): if (item.get('type') in ['web_url', 'postback', 'phone_number']): type = item.get('type') title = item.get('title') value = item.get('value', item.get('url', item.get('payload'))) if (type == 'web_url'): result.append(ButtonWeb(title=title, url=value)) elif (type == 'postback'): result.append(ButtonPostBack(title=title, payload=value)) elif (type == 'phone_number'): result.append(ButtonPhoneNumber(title=title, payload=value)) else: raise ValueError('Invalid button type') else: raise ValueError('Invalid buttons variables') return result else: return items<|docstring|>support shortcut buttons [{'type':'web_url', 'title':'open web url', 'value':'https://~~'}]<|endoftext|>
4ffe8f08c43d59b0c972b34a8044b22b5e70046cc185dbe2b0ed7f1ba8f0167d
def cost_func_ce(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (- test_error)
BASELINE EXECUTION (valid also for oracle and final training, with optimized values of lambda) :param saver: `Saver` object (can be None) :param name: optional name for the saver :param data: `Datasets` object :param T: number of iterations :param lmd: weights for the examples, if None sets to 1. :param model: a model (should comply with `rf.Network`) :param y: placeholder for output :param lr: learning rate :return:
HOZO/hozo/data_hyper_cleaning_bo.py
cost_func_ce
jsgubin/HOZOG
4
python
def cost_func_ce(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (- test_error)
def cost_func_ce(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (- test_error)<|docstring|>BASELINE EXECUTION (valid also for oracle and final training, with optimized values of lambda) :param saver: `Saver` object (can be None) :param name: optional name for the saver :param data: `Datasets` object :param T: number of iterations :param lmd: weights for the examples, if None sets to 1. :param model: a model (should comply with `rf.Network`) :param y: placeholder for output :param lr: learning rate :return:<|endoftext|>
9261b4cebcc6c19dd5261c369b9c28d2bfff5972cb1d7f7fc9b044d43161bf1d
def cost_func_01(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (baseline_test_accuracy - 1)
BASELINE EXECUTION (valid also for oracle and final training, with optimized values of lambda) :param saver: `Saver` object (can be None) :param name: optional name for the saver :param data: `Datasets` object :param T: number of iterations :param lmd: weights for the examples, if None sets to 1. :param model: a model (should comply with `rf.Network`) :param y: placeholder for output :param lr: learning rate :return:
HOZO/hozo/data_hyper_cleaning_bo.py
cost_func_01
jsgubin/HOZOG
4
python
def cost_func_01(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (baseline_test_accuracy - 1)
def cost_func_01(self, saver, model, y, data, T, lr, lmd=None, name=None): '\n BASELINE EXECUTION (valid also for oracle and final training,\n with optimized values of lambda)\n\n :param saver: `Saver` object (can be None)\n :param name: optional name for the saver\n :param data: `Datasets` object\n :param T: number of iterations\n :param lmd: weights for the examples, if None sets to 1.\n :param model: a model (should comply with `rf.Network`)\n :param y: placeholder for output\n :param lr: learning rate\n :return:\n ' x = model.inp[0] train_s = data.train.create_supplier(x, y) valid_s = data.validation.create_supplier(x, y) error2 = tf.reduce_mean((lmd * hozo.cross_entropy_loss(y, model.out))) correct_prediction2 = tf.equal(tf.argmax(model.out, 1), tf.argmax(y, 1)) accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, 'float')) error = tf.reduce_mean(hozo.cross_entropy_loss(y, model.out)) opt = tf.train.GradientDescentOptimizer(lr) ts1 = opt.minimize(error2, var_list=model.var_list) if saver: saver.clear_items() saver.add_items('Test Accuracy', accuracy2, tst_s) with tf.Session(config=hozo.CONFIG_GPU_GROWTH).as_default(): tf.variables_initializer(model.var_list).run() for _ in range(T): ts1.run(feed_dict=train_s()) if saver: saver.save(name) baseline_test_accuracy = accuracy2.eval(feed_dict=valid_s()) test_error = error.eval(feed_dict=valid_s()) return (baseline_test_accuracy - 1)<|docstring|>BASELINE EXECUTION (valid also for oracle and final training, with optimized values of lambda) :param saver: `Saver` object (can be None) :param name: optional name for the saver :param data: `Datasets` object :param T: number of iterations :param lmd: weights for the examples, if None sets to 1. :param model: a model (should comply with `rf.Network`) :param y: placeholder for output :param lr: learning rate :return:<|endoftext|>
4f92830d52d5eeef9c1955c39c0e9136bb6003ba5e25e7a32a6dca988ab16925
def __init__(self, M=1): 'Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates.\n\n @param M\n Mass parameter. Default is 1.\n ' super().__init__() self._M = float(M)
Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates. @param M Mass parameter. Default is 1.
motsfinder/metric/analytical/schwarzschildpg.py
__init__
daniel-dpk/distorted-motsfinder-public
4
python
def __init__(self, M=1): 'Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates.\n\n @param M\n Mass parameter. Default is 1.\n ' super().__init__() self._M = float(M)
def __init__(self, M=1): 'Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates.\n\n @param M\n Mass parameter. Default is 1.\n ' super().__init__() self._M = float(M)<|docstring|>Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates. @param M Mass parameter. Default is 1.<|endoftext|>
95f96b95b3bee64c27c59f5ec2c7398b5331c1f1eb4782d5dca0875dcad5c22a
@property def M(self): 'ADM mass of the Schwarzschild spacetime.' return self._M
ADM mass of the Schwarzschild spacetime.
motsfinder/metric/analytical/schwarzschildpg.py
M
daniel-dpk/distorted-motsfinder-public
4
python
@property def M(self): return self._M
@property def M(self): return self._M<|docstring|>ADM mass of the Schwarzschild spacetime.<|endoftext|>
851c2db435f8746140b08b6439ef7140dc07bd2bbbd543d67b80a49aa88a9328
def _mat_at(self, point): 'Three metric at a given point in Cartesian (x,y,z) coordinates.' return np.identity(3)
Three metric at a given point in Cartesian (x,y,z) coordinates.
motsfinder/metric/analytical/schwarzschildpg.py
_mat_at
daniel-dpk/distorted-motsfinder-public
4
python
def _mat_at(self, point): return np.identity(3)
def _mat_at(self, point): return np.identity(3)<|docstring|>Three metric at a given point in Cartesian (x,y,z) coordinates.<|endoftext|>
2ec423f0fbf8f3e525ff6115e2c503a45fc261ac629857eddead59e83e9b8c0e
def recursive_glob(env, root: str, extensions: list, ignored_dirs: list=[], ignored_files: list=[]): '\n Finds all files in a root directory matching the provided file extensions while\n skipping any file within ignored directories.\n\n Returns a flattened list of paths\n ' sources = [] rootabs = env.Dir(root).abspath vt = VariantTool(env) extensions = tuple(extensions) for (dirpath, dirs, files) in os.walk(vt.to_src(rootabs)): if any(((ignored_dir in dirpath) for ignored_dir in ignored_dirs)): continue '\n For every file in the list of files found within the directory, if the extension exists within the list of desired extensions,\n\n Create the full path by combining the directory path and filename (with extension). From there, translate this path to the\n corresponding location in the variant build directory (so that SCons is made aware that it is required for the given variant).\n\n Finally, normalize the path and add it to the list of discovered source files.\n ' sources.extend((os.path.normpath(vt.to_variant(os.path.join(dirpath, file))) for file in files if (file.endswith(extensions) and (file not in ignored_files)))) return sources
Finds all files in a root directory matching the provided file extensions while skipping any file within ignored directories. Returns a flattened list of paths
Scones/glob.py
recursive_glob
panix-os-dev-team/Panix
6
python
def recursive_glob(env, root: str, extensions: list, ignored_dirs: list=[], ignored_files: list=[]): '\n Finds all files in a root directory matching the provided file extensions while\n skipping any file within ignored directories.\n\n Returns a flattened list of paths\n ' sources = [] rootabs = env.Dir(root).abspath vt = VariantTool(env) extensions = tuple(extensions) for (dirpath, dirs, files) in os.walk(vt.to_src(rootabs)): if any(((ignored_dir in dirpath) for ignored_dir in ignored_dirs)): continue '\n For every file in the list of files found within the directory, if the extension exists within the list of desired extensions,\n\n Create the full path by combining the directory path and filename (with extension). From there, translate this path to the\n corresponding location in the variant build directory (so that SCons is made aware that it is required for the given variant).\n\n Finally, normalize the path and add it to the list of discovered source files.\n ' sources.extend((os.path.normpath(vt.to_variant(os.path.join(dirpath, file))) for file in files if (file.endswith(extensions) and (file not in ignored_files)))) return sources
def recursive_glob(env, root: str, extensions: list, ignored_dirs: list=[], ignored_files: list=[]): '\n Finds all files in a root directory matching the provided file extensions while\n skipping any file within ignored directories.\n\n Returns a flattened list of paths\n ' sources = [] rootabs = env.Dir(root).abspath vt = VariantTool(env) extensions = tuple(extensions) for (dirpath, dirs, files) in os.walk(vt.to_src(rootabs)): if any(((ignored_dir in dirpath) for ignored_dir in ignored_dirs)): continue '\n For every file in the list of files found within the directory, if the extension exists within the list of desired extensions,\n\n Create the full path by combining the directory path and filename (with extension). From there, translate this path to the\n corresponding location in the variant build directory (so that SCons is made aware that it is required for the given variant).\n\n Finally, normalize the path and add it to the list of discovered source files.\n ' sources.extend((os.path.normpath(vt.to_variant(os.path.join(dirpath, file))) for file in files if (file.endswith(extensions) and (file not in ignored_files)))) return sources<|docstring|>Finds all files in a root directory matching the provided file extensions while skipping any file within ignored directories. Returns a flattened list of paths<|endoftext|>
9da1d14b62f689b9262f22e054cde13f1a28da6b2af47951061fe67f87e02a91
def to_src(self, variant): '\n Translate a variant build path into the corresponding path in the root source tree\n ' rel = ('.' + variant.removeprefix(self.abs)) return os.path.join(self.src, rel)
Translate a variant build path into the corresponding path in the root source tree
Scones/glob.py
to_src
panix-os-dev-team/Panix
6
python
def to_src(self, variant): '\n \n ' rel = ('.' + variant.removeprefix(self.abs)) return os.path.join(self.src, rel)
def to_src(self, variant): '\n \n ' rel = ('.' + variant.removeprefix(self.abs)) return os.path.join(self.src, rel)<|docstring|>Translate a variant build path into the corresponding path in the root source tree<|endoftext|>
ed27febd3c43e7a4fc34d23ec3f23835b2050b5161aaca5f3cbe3412b65c9e48
def to_variant(self, src): '\n Translate a source tree path into the corresponding variant build path\n ' rel = ('.' + src.removeprefix(self.src)) return os.path.join(self.abs, rel)
Translate a source tree path into the corresponding variant build path
Scones/glob.py
to_variant
panix-os-dev-team/Panix
6
python
def to_variant(self, src): '\n \n ' rel = ('.' + src.removeprefix(self.src)) return os.path.join(self.abs, rel)
def to_variant(self, src): '\n \n ' rel = ('.' + src.removeprefix(self.src)) return os.path.join(self.abs, rel)<|docstring|>Translate a source tree path into the corresponding variant build path<|endoftext|>
f90518245f6f636f0f02b46a9fe3063137f688112f2fe7c1a854b60db0d73a63
def parse_options(): 'Handle command-line options\n\n Return parser object and list of arguments\n ' parser = configargparse.ArgumentParser() parser.add_argument('-p', '--profile', required=False) parser.add_argument('-r', '--region', required=False) parser.add_argument('--version', action='version', version=__about__.__version__) subparsers = parser.add_subparsers(title='available subcommands', dest='subcommand') parser_resources = subparsers.add_parser('resources', help='List stack resources') parser_resources.add_argument('name', help='Stack name') parser_resources.add_argument('logical_id', nargs='?', default=None, help='Logical resource id. Returns physical_resource_id.') parser_outputs = subparsers.add_parser('outputs', help='List stack outputs') parser_outputs.add_argument('name', help='Stack name') parser_outputs.add_argument('output_name', nargs='?', default=None, help='Output name. Returns output value.') parser_config = subparsers.add_parser('config', help='Print config properties') parser_config.add_argument('-e', '--env', env_var='STACKS_ENV') parser_config.add_argument('-o', '--output', default='text', choices=['text', 'yaml', 'json'], dest='output_format', help='Output format') parser_config.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_config.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_config.add_argument('property_name', nargs='?', default=None) parser_list = subparsers.add_parser('list', help='List stacks') parser_list.add_argument('-v', '--verbose', action='store_true') parser_list.add_argument('name', default='*', nargs='?', help='Stack name or unix shell-style pattern') parser_create = subparsers.add_parser('create', help='Create a new stack') parser_create.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_create.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_create.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_create.add_argument('name', nargs='?', default=None) parser_create.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_create.add_argument('-P', '--property', required=False, action='append') parser_create.add_argument('-d', '--dry-run', action='store_true') parser_create.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_update = subparsers.add_parser('update', help='Update an existing stack') parser_update.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_update.add_argument('-c', '--config', env_var='STACKS_CONFIG', default='config.yaml', required=False, type=_is_file) parser_update.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_update.add_argument('name', nargs='?', default=None) parser_update.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_update.add_argument('-P', '--property', required=False, action='append') parser_update.add_argument('-d', '--dry-run', action='store_true') parser_update.add_argument('--create', dest='create_on_update', help='Create if stack does not exist.', action='store_true') parser_update.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete = subparsers.add_parser('delete', help='Delete an existing stack') parser_delete.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete.add_argument('-y', '--yes', help='Confirm stack deletion.', action='store_true') parser_delete.add_argument('name') parser_events = subparsers.add_parser('events', help='List events from a stack') parser_events.add_argument('name') parser_events.add_argument('-f', '--follow', dest='events_follow', action='store_true', help='Poll for new events until stopped.') parser_events.add_argument('-n', '--lines', default='10', type=int, help='Maximum number of lines of CF output returned per 5 second iteration') return (parser, parser.parse_args())
Handle command-line options Return parser object and list of arguments
stacks/cli.py
parse_options
hmrc/stacks
0
python
def parse_options(): 'Handle command-line options\n\n Return parser object and list of arguments\n ' parser = configargparse.ArgumentParser() parser.add_argument('-p', '--profile', required=False) parser.add_argument('-r', '--region', required=False) parser.add_argument('--version', action='version', version=__about__.__version__) subparsers = parser.add_subparsers(title='available subcommands', dest='subcommand') parser_resources = subparsers.add_parser('resources', help='List stack resources') parser_resources.add_argument('name', help='Stack name') parser_resources.add_argument('logical_id', nargs='?', default=None, help='Logical resource id. Returns physical_resource_id.') parser_outputs = subparsers.add_parser('outputs', help='List stack outputs') parser_outputs.add_argument('name', help='Stack name') parser_outputs.add_argument('output_name', nargs='?', default=None, help='Output name. Returns output value.') parser_config = subparsers.add_parser('config', help='Print config properties') parser_config.add_argument('-e', '--env', env_var='STACKS_ENV') parser_config.add_argument('-o', '--output', default='text', choices=['text', 'yaml', 'json'], dest='output_format', help='Output format') parser_config.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_config.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_config.add_argument('property_name', nargs='?', default=None) parser_list = subparsers.add_parser('list', help='List stacks') parser_list.add_argument('-v', '--verbose', action='store_true') parser_list.add_argument('name', default='*', nargs='?', help='Stack name or unix shell-style pattern') parser_create = subparsers.add_parser('create', help='Create a new stack') parser_create.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_create.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_create.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_create.add_argument('name', nargs='?', default=None) parser_create.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_create.add_argument('-P', '--property', required=False, action='append') parser_create.add_argument('-d', '--dry-run', action='store_true') parser_create.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_update = subparsers.add_parser('update', help='Update an existing stack') parser_update.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_update.add_argument('-c', '--config', env_var='STACKS_CONFIG', default='config.yaml', required=False, type=_is_file) parser_update.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_update.add_argument('name', nargs='?', default=None) parser_update.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_update.add_argument('-P', '--property', required=False, action='append') parser_update.add_argument('-d', '--dry-run', action='store_true') parser_update.add_argument('--create', dest='create_on_update', help='Create if stack does not exist.', action='store_true') parser_update.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete = subparsers.add_parser('delete', help='Delete an existing stack') parser_delete.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete.add_argument('-y', '--yes', help='Confirm stack deletion.', action='store_true') parser_delete.add_argument('name') parser_events = subparsers.add_parser('events', help='List events from a stack') parser_events.add_argument('name') parser_events.add_argument('-f', '--follow', dest='events_follow', action='store_true', help='Poll for new events until stopped.') parser_events.add_argument('-n', '--lines', default='10', type=int, help='Maximum number of lines of CF output returned per 5 second iteration') return (parser, parser.parse_args())
def parse_options(): 'Handle command-line options\n\n Return parser object and list of arguments\n ' parser = configargparse.ArgumentParser() parser.add_argument('-p', '--profile', required=False) parser.add_argument('-r', '--region', required=False) parser.add_argument('--version', action='version', version=__about__.__version__) subparsers = parser.add_subparsers(title='available subcommands', dest='subcommand') parser_resources = subparsers.add_parser('resources', help='List stack resources') parser_resources.add_argument('name', help='Stack name') parser_resources.add_argument('logical_id', nargs='?', default=None, help='Logical resource id. Returns physical_resource_id.') parser_outputs = subparsers.add_parser('outputs', help='List stack outputs') parser_outputs.add_argument('name', help='Stack name') parser_outputs.add_argument('output_name', nargs='?', default=None, help='Output name. Returns output value.') parser_config = subparsers.add_parser('config', help='Print config properties') parser_config.add_argument('-e', '--env', env_var='STACKS_ENV') parser_config.add_argument('-o', '--output', default='text', choices=['text', 'yaml', 'json'], dest='output_format', help='Output format') parser_config.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_config.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_config.add_argument('property_name', nargs='?', default=None) parser_list = subparsers.add_parser('list', help='List stacks') parser_list.add_argument('-v', '--verbose', action='store_true') parser_list.add_argument('name', default='*', nargs='?', help='Stack name or unix shell-style pattern') parser_create = subparsers.add_parser('create', help='Create a new stack') parser_create.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_create.add_argument('-c', '--config', default='config.yaml', env_var='STACKS_CONFIG', required=False, type=_is_file) parser_create.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_create.add_argument('name', nargs='?', default=None) parser_create.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_create.add_argument('-P', '--property', required=False, action='append') parser_create.add_argument('-d', '--dry-run', action='store_true') parser_create.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_update = subparsers.add_parser('update', help='Update an existing stack') parser_update.add_argument('-t', '--template', required=True, type=configargparse.FileType()) parser_update.add_argument('-c', '--config', env_var='STACKS_CONFIG', default='config.yaml', required=False, type=_is_file) parser_update.add_argument('--config-dir', default='config.d', env_var='STACKS_CONFIG_DIR', required=False, type=_is_dir) parser_update.add_argument('name', nargs='?', default=None) parser_update.add_argument('-e', '--env', env_var='STACKS_ENV', required=True) parser_update.add_argument('-P', '--property', required=False, action='append') parser_update.add_argument('-d', '--dry-run', action='store_true') parser_update.add_argument('--create', dest='create_on_update', help='Create if stack does not exist.', action='store_true') parser_update.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete = subparsers.add_parser('delete', help='Delete an existing stack') parser_delete.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') parser_delete.add_argument('-y', '--yes', help='Confirm stack deletion.', action='store_true') parser_delete.add_argument('name') parser_events = subparsers.add_parser('events', help='List events from a stack') parser_events.add_argument('name') parser_events.add_argument('-f', '--follow', dest='events_follow', action='store_true', help='Poll for new events until stopped.') parser_events.add_argument('-n', '--lines', default='10', type=int, help='Maximum number of lines of CF output returned per 5 second iteration') return (parser, parser.parse_args())<|docstring|>Handle command-line options Return parser object and list of arguments<|endoftext|>
584084a21f413ab349d544a6430a6cedef41691fb9be3344d8bc2ae97a9a4108
def _is_file(fname): 'Check whether fname is a file\n\n To be used as a type argument in add_argument()\n ' return (fname if os.path.isfile(fname) else None)
Check whether fname is a file To be used as a type argument in add_argument()
stacks/cli.py
_is_file
hmrc/stacks
0
python
def _is_file(fname): 'Check whether fname is a file\n\n To be used as a type argument in add_argument()\n ' return (fname if os.path.isfile(fname) else None)
def _is_file(fname): 'Check whether fname is a file\n\n To be used as a type argument in add_argument()\n ' return (fname if os.path.isfile(fname) else None)<|docstring|>Check whether fname is a file To be used as a type argument in add_argument()<|endoftext|>
6cd0a6a9e678a4195848dbd7699ca03ed3736aa236a067ceccca0c8fca889c63
def _is_dir(dirname): 'Check whether dirname is a dir\n\n To be used as a type argument in add_argument()\n ' return (dirname if os.path.isdir(dirname) else None)
Check whether dirname is a dir To be used as a type argument in add_argument()
stacks/cli.py
_is_dir
hmrc/stacks
0
python
def _is_dir(dirname): 'Check whether dirname is a dir\n\n To be used as a type argument in add_argument()\n ' return (dirname if os.path.isdir(dirname) else None)
def _is_dir(dirname): 'Check whether dirname is a dir\n\n To be used as a type argument in add_argument()\n ' return (dirname if os.path.isdir(dirname) else None)<|docstring|>Check whether dirname is a dir To be used as a type argument in add_argument()<|endoftext|>
6fe5c64c5c120c241f0893f3f43f5d84ba3fac14dd83e81d09c5b7a5ad1486ea
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_cancel_running_job(): "\n Test verifies that the moment the job has started, we can't cancel anymore\n " (ec2_client, iam_client, _, _, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) job_def_name = 'echo-o-o' commands = ['echo', 'start'] (job_def_arn, queue_arn) = prepare_job(batch_client, commands, iam_arn, job_def_name) resp = batch_client.submit_job(jobName='test_job_name', jobQueue=queue_arn, jobDefinition=job_def_arn) job_id = resp['jobId'] _wait_for_job_status(batch_client, job_id, 'STARTING') batch_client.cancel_job(jobId=job_id, reason='test_cancel') _wait_for_job_status(batch_client, job_id, 'SUCCEEDED', seconds_to_wait=5) resp = batch_client.describe_jobs(jobs=[job_id]) resp['jobs'][0]['jobName'].should.equal('test_job_name') resp['jobs'][0].shouldnt.have.key('statusReason')
Test verifies that the moment the job has started, we can't cancel anymore
tests/test_batch/test_batch_jobs.py
test_cancel_running_job
danielreisrodrigues/moto
5,460
python
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_cancel_running_job(): "\n \n " (ec2_client, iam_client, _, _, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) job_def_name = 'echo-o-o' commands = ['echo', 'start'] (job_def_arn, queue_arn) = prepare_job(batch_client, commands, iam_arn, job_def_name) resp = batch_client.submit_job(jobName='test_job_name', jobQueue=queue_arn, jobDefinition=job_def_arn) job_id = resp['jobId'] _wait_for_job_status(batch_client, job_id, 'STARTING') batch_client.cancel_job(jobId=job_id, reason='test_cancel') _wait_for_job_status(batch_client, job_id, 'SUCCEEDED', seconds_to_wait=5) resp = batch_client.describe_jobs(jobs=[job_id]) resp['jobs'][0]['jobName'].should.equal('test_job_name') resp['jobs'][0].shouldnt.have.key('statusReason')
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_cancel_running_job(): "\n \n " (ec2_client, iam_client, _, _, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) job_def_name = 'echo-o-o' commands = ['echo', 'start'] (job_def_arn, queue_arn) = prepare_job(batch_client, commands, iam_arn, job_def_name) resp = batch_client.submit_job(jobName='test_job_name', jobQueue=queue_arn, jobDefinition=job_def_arn) job_id = resp['jobId'] _wait_for_job_status(batch_client, job_id, 'STARTING') batch_client.cancel_job(jobId=job_id, reason='test_cancel') _wait_for_job_status(batch_client, job_id, 'SUCCEEDED', seconds_to_wait=5) resp = batch_client.describe_jobs(jobs=[job_id]) resp['jobs'][0]['jobName'].should.equal('test_job_name') resp['jobs'][0].shouldnt.have.key('statusReason')<|docstring|>Test verifies that the moment the job has started, we can't cancel anymore<|endoftext|>
cc2e22f4974a0bbebe6bafc6e869aa79dc32d889701a02556e02634938b03c17
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_container_overrides(): '\n Test if container overrides have any effect.\n Overwrites should be reflected in container description.\n Environment variables should be accessible inside docker container\n ' (ec2_client, iam_client, _, logs_client, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) compute_name = str(uuid4())[0:6] resp = batch_client.create_compute_environment(computeEnvironmentName=compute_name, type='UNMANAGED', state='ENABLED', serviceRole=iam_arn) arn = resp['computeEnvironmentArn'] resp = batch_client.create_job_queue(jobQueueName=str(uuid4())[0:6], state='ENABLED', priority=123, computeEnvironmentOrder=[{'order': 123, 'computeEnvironment': arn}]) queue_arn = resp['jobQueueArn'] job_definition_name = f'sleep10_{str(uuid4())[0:6]}' resp = batch_client.register_job_definition(jobDefinitionName=job_definition_name, type='container', containerProperties={'image': 'busybox', 'vcpus': 1, 'memory': 512, 'command': ['sleep', '10'], 'environment': [{'name': 'TEST0', 'value': 'from job definition'}, {'name': 'TEST1', 'value': 'from job definition'}]}) job_definition_arn = resp['jobDefinitionArn'] resp = batch_client.submit_job(jobName='test1', jobQueue=queue_arn, jobDefinition=job_definition_name, containerOverrides={'vcpus': 2, 'memory': 1024, 'command': ['printenv'], 'environment': [{'name': 'TEST0', 'value': 'from job'}, {'name': 'TEST2', 'value': 'from job'}]}) job_id = resp['jobId'] future = (datetime.datetime.now() + datetime.timedelta(seconds=30)) while (datetime.datetime.now() < future): resp_jobs = batch_client.describe_jobs(jobs=[job_id]) if (resp_jobs['jobs'][0]['status'] == 'FAILED'): raise RuntimeError('Batch job failed') if (resp_jobs['jobs'][0]['status'] == 'SUCCEEDED'): break time.sleep(0.5) else: raise RuntimeError('Batch job timed out') resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job') env_var = list() for stream in resp['logStreams']: ls_name = stream['logStreamName'] stream_resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) for event in stream_resp['events']: if (('TEST' in event['message']) or ('AWS' in event['message'])): (key, value) = tuple(event['message'].split('=')) env_var.append({'name': key, 'value': value}) len(resp_jobs['jobs']).should.equal(1) resp_jobs['jobs'][0]['jobId'].should.equal(job_id) resp_jobs['jobs'][0]['jobQueue'].should.equal(queue_arn) resp_jobs['jobs'][0]['jobDefinition'].should.equal(job_definition_arn) resp_jobs['jobs'][0]['container']['vcpus'].should.equal(2) resp_jobs['jobs'][0]['container']['memory'].should.equal(1024) resp_jobs['jobs'][0]['container']['command'].should.equal(['printenv']) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id}) sure.expect(env_var).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(env_var).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id})
Test if container overrides have any effect. Overwrites should be reflected in container description. Environment variables should be accessible inside docker container
tests/test_batch/test_batch_jobs.py
test_container_overrides
danielreisrodrigues/moto
5,460
python
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_container_overrides(): '\n Test if container overrides have any effect.\n Overwrites should be reflected in container description.\n Environment variables should be accessible inside docker container\n ' (ec2_client, iam_client, _, logs_client, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) compute_name = str(uuid4())[0:6] resp = batch_client.create_compute_environment(computeEnvironmentName=compute_name, type='UNMANAGED', state='ENABLED', serviceRole=iam_arn) arn = resp['computeEnvironmentArn'] resp = batch_client.create_job_queue(jobQueueName=str(uuid4())[0:6], state='ENABLED', priority=123, computeEnvironmentOrder=[{'order': 123, 'computeEnvironment': arn}]) queue_arn = resp['jobQueueArn'] job_definition_name = f'sleep10_{str(uuid4())[0:6]}' resp = batch_client.register_job_definition(jobDefinitionName=job_definition_name, type='container', containerProperties={'image': 'busybox', 'vcpus': 1, 'memory': 512, 'command': ['sleep', '10'], 'environment': [{'name': 'TEST0', 'value': 'from job definition'}, {'name': 'TEST1', 'value': 'from job definition'}]}) job_definition_arn = resp['jobDefinitionArn'] resp = batch_client.submit_job(jobName='test1', jobQueue=queue_arn, jobDefinition=job_definition_name, containerOverrides={'vcpus': 2, 'memory': 1024, 'command': ['printenv'], 'environment': [{'name': 'TEST0', 'value': 'from job'}, {'name': 'TEST2', 'value': 'from job'}]}) job_id = resp['jobId'] future = (datetime.datetime.now() + datetime.timedelta(seconds=30)) while (datetime.datetime.now() < future): resp_jobs = batch_client.describe_jobs(jobs=[job_id]) if (resp_jobs['jobs'][0]['status'] == 'FAILED'): raise RuntimeError('Batch job failed') if (resp_jobs['jobs'][0]['status'] == 'SUCCEEDED'): break time.sleep(0.5) else: raise RuntimeError('Batch job timed out') resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job') env_var = list() for stream in resp['logStreams']: ls_name = stream['logStreamName'] stream_resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) for event in stream_resp['events']: if (('TEST' in event['message']) or ('AWS' in event['message'])): (key, value) = tuple(event['message'].split('=')) env_var.append({'name': key, 'value': value}) len(resp_jobs['jobs']).should.equal(1) resp_jobs['jobs'][0]['jobId'].should.equal(job_id) resp_jobs['jobs'][0]['jobQueue'].should.equal(queue_arn) resp_jobs['jobs'][0]['jobDefinition'].should.equal(job_definition_arn) resp_jobs['jobs'][0]['container']['vcpus'].should.equal(2) resp_jobs['jobs'][0]['container']['memory'].should.equal(1024) resp_jobs['jobs'][0]['container']['command'].should.equal(['printenv']) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id}) sure.expect(env_var).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(env_var).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id})
@mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch def test_container_overrides(): '\n Test if container overrides have any effect.\n Overwrites should be reflected in container description.\n Environment variables should be accessible inside docker container\n ' (ec2_client, iam_client, _, logs_client, batch_client) = _get_clients() (_, _, _, iam_arn) = _setup(ec2_client, iam_client) compute_name = str(uuid4())[0:6] resp = batch_client.create_compute_environment(computeEnvironmentName=compute_name, type='UNMANAGED', state='ENABLED', serviceRole=iam_arn) arn = resp['computeEnvironmentArn'] resp = batch_client.create_job_queue(jobQueueName=str(uuid4())[0:6], state='ENABLED', priority=123, computeEnvironmentOrder=[{'order': 123, 'computeEnvironment': arn}]) queue_arn = resp['jobQueueArn'] job_definition_name = f'sleep10_{str(uuid4())[0:6]}' resp = batch_client.register_job_definition(jobDefinitionName=job_definition_name, type='container', containerProperties={'image': 'busybox', 'vcpus': 1, 'memory': 512, 'command': ['sleep', '10'], 'environment': [{'name': 'TEST0', 'value': 'from job definition'}, {'name': 'TEST1', 'value': 'from job definition'}]}) job_definition_arn = resp['jobDefinitionArn'] resp = batch_client.submit_job(jobName='test1', jobQueue=queue_arn, jobDefinition=job_definition_name, containerOverrides={'vcpus': 2, 'memory': 1024, 'command': ['printenv'], 'environment': [{'name': 'TEST0', 'value': 'from job'}, {'name': 'TEST2', 'value': 'from job'}]}) job_id = resp['jobId'] future = (datetime.datetime.now() + datetime.timedelta(seconds=30)) while (datetime.datetime.now() < future): resp_jobs = batch_client.describe_jobs(jobs=[job_id]) if (resp_jobs['jobs'][0]['status'] == 'FAILED'): raise RuntimeError('Batch job failed') if (resp_jobs['jobs'][0]['status'] == 'SUCCEEDED'): break time.sleep(0.5) else: raise RuntimeError('Batch job timed out') resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job') env_var = list() for stream in resp['logStreams']: ls_name = stream['logStreamName'] stream_resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) for event in stream_resp['events']: if (('TEST' in event['message']) or ('AWS' in event['message'])): (key, value) = tuple(event['message'].split('=')) env_var.append({'name': key, 'value': value}) len(resp_jobs['jobs']).should.equal(1) resp_jobs['jobs'][0]['jobId'].should.equal(job_id) resp_jobs['jobs'][0]['jobQueue'].should.equal(queue_arn) resp_jobs['jobs'][0]['jobDefinition'].should.equal(job_definition_arn) resp_jobs['jobs'][0]['container']['vcpus'].should.equal(2) resp_jobs['jobs'][0]['container']['memory'].should.equal(1024) resp_jobs['jobs'][0]['container']['command'].should.equal(['printenv']) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(resp_jobs['jobs'][0]['container']['environment']).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id}) sure.expect(env_var).to.contain({'name': 'TEST0', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'TEST1', 'value': 'from job definition'}) sure.expect(env_var).to.contain({'name': 'TEST2', 'value': 'from job'}) sure.expect(env_var).to.contain({'name': 'AWS_BATCH_JOB_ID', 'value': job_id})<|docstring|>Test if container overrides have any effect. Overwrites should be reflected in container description. Environment variables should be accessible inside docker container<|endoftext|>
cf1c680da6e4a2b13dfd176832badfa6ad0a0edb581aa18ee8ac7e3b3d4ec05e
def load_config(): 'Return configuration, from YAML file, for this application.\n Raises:\n Exception: If not able to read or parse the configuration file for any\n reason or if "base branch" isn\'t set in the configuration\n file.\n Returns:\n dict: Configuration information.\n ' config_filename = '.cft.yml' config_path = os.path.join(os.path.expanduser('~'), config_filename) try: config = yaml.safe_load(open(config_path)) except IOError: raise Exception('Unable to load ~/{}: does it exist (or is there a YAML error)?'.format(config_filename)) config['filename'] = config_filename if ('api key' not in config): raise Exception('Please set Clockify API key as "api key" in {}.'.format(config_filename)) return config
Return configuration, from YAML file, for this application. Raises: Exception: If not able to read or parse the configuration file for any reason or if "base branch" isn't set in the configuration file. Returns: dict: Configuration information.
app.py
load_config
mcantelon/clockify-tool
0
python
def load_config(): 'Return configuration, from YAML file, for this application.\n Raises:\n Exception: If not able to read or parse the configuration file for any\n reason or if "base branch" isn\'t set in the configuration\n file.\n Returns:\n dict: Configuration information.\n ' config_filename = '.cft.yml' config_path = os.path.join(os.path.expanduser('~'), config_filename) try: config = yaml.safe_load(open(config_path)) except IOError: raise Exception('Unable to load ~/{}: does it exist (or is there a YAML error)?'.format(config_filename)) config['filename'] = config_filename if ('api key' not in config): raise Exception('Please set Clockify API key as "api key" in {}.'.format(config_filename)) return config
def load_config(): 'Return configuration, from YAML file, for this application.\n Raises:\n Exception: If not able to read or parse the configuration file for any\n reason or if "base branch" isn\'t set in the configuration\n file.\n Returns:\n dict: Configuration information.\n ' config_filename = '.cft.yml' config_path = os.path.join(os.path.expanduser('~'), config_filename) try: config = yaml.safe_load(open(config_path)) except IOError: raise Exception('Unable to load ~/{}: does it exist (or is there a YAML error)?'.format(config_filename)) config['filename'] = config_filename if ('api key' not in config): raise Exception('Please set Clockify API key as "api key" in {}.'.format(config_filename)) return config<|docstring|>Return configuration, from YAML file, for this application. Raises: Exception: If not able to read or parse the configuration file for any reason or if "base branch" isn't set in the configuration file. Returns: dict: Configuration information.<|endoftext|>
3588b270eaccdf95156e7766d243f68e95af235d670a3e9393790dd667bbb9d6
def get_systeminfo(ipaddress, config, interactive=False): 'Run data plane discovery using this module against a host.\n\n :param ipaddress: address to the host to discover.\n :param config: arguments and configuration suppplied to satori.\n :keyword interactive: whether to prompt the user for information.\n ' if ((ipaddress in utils.get_local_ips()) or ipaddress_module.ip_address(six.text_type(ipaddress)).is_loopback): client = bash.LocalShell() client.host = 'localhost' client.port = 0 perform_install(client) return system_info(client) else: with bash.RemoteShell(ipaddress, username=config['host_username'], private_key=config['host_key'], interactive=interactive) as client: perform_install(client) return system_info(client)
Run data plane discovery using this module against a host. :param ipaddress: address to the host to discover. :param config: arguments and configuration suppplied to satori. :keyword interactive: whether to prompt the user for information.
satori/sysinfo/ohai_solo.py
get_systeminfo
samstav/satori
1
python
def get_systeminfo(ipaddress, config, interactive=False): 'Run data plane discovery using this module against a host.\n\n :param ipaddress: address to the host to discover.\n :param config: arguments and configuration suppplied to satori.\n :keyword interactive: whether to prompt the user for information.\n ' if ((ipaddress in utils.get_local_ips()) or ipaddress_module.ip_address(six.text_type(ipaddress)).is_loopback): client = bash.LocalShell() client.host = 'localhost' client.port = 0 perform_install(client) return system_info(client) else: with bash.RemoteShell(ipaddress, username=config['host_username'], private_key=config['host_key'], interactive=interactive) as client: perform_install(client) return system_info(client)
def get_systeminfo(ipaddress, config, interactive=False): 'Run data plane discovery using this module against a host.\n\n :param ipaddress: address to the host to discover.\n :param config: arguments and configuration suppplied to satori.\n :keyword interactive: whether to prompt the user for information.\n ' if ((ipaddress in utils.get_local_ips()) or ipaddress_module.ip_address(six.text_type(ipaddress)).is_loopback): client = bash.LocalShell() client.host = 'localhost' client.port = 0 perform_install(client) return system_info(client) else: with bash.RemoteShell(ipaddress, username=config['host_username'], private_key=config['host_key'], interactive=interactive) as client: perform_install(client) return system_info(client)<|docstring|>Run data plane discovery using this module against a host. :param ipaddress: address to the host to discover. :param config: arguments and configuration suppplied to satori. :keyword interactive: whether to prompt the user for information.<|endoftext|>
339445e8d0f257bfcfe44591264f537065c7b0d3d99eb7d19ecbec85ce6cab8d
def system_info(client, with_install=False, install_dir=None): 'Run ohai-solo on a remote system and gather the output.\n\n :param client: :class:`ssh.SSH` instance\n :param with_install Will install ohai-solo if set to True\n :param install_dir string containing directory to install to\n :returns: dict -- system information from ohai-solo\n :raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson\n SystemInfoMissingJson\n\n SystemInfoCommandMissing if `ohai` is not installed.\n SystemInfoCommandOld if `ohai` is not the latest.\n SystemInfoNotJson if `ohai` does not return valid JSON.\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' if with_install: perform_install(client, install_dir=install_dir) if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) ohai_solo_prefix = (install_dir or '/opt') ohai_solo_command = six.moves.shlex_quote(('%s/ohai-solo/bin/ohai-solo' % ohai_solo_prefix)) command = ('unset GEM_CACHE GEM_HOME GEM_PATH && sudo %s' % ohai_solo_command) output = client.execute(command, escalate=True, allow_many=False) not_found_msgs = ['command not found', 'Could not find ohai'] if any(((m in k) for m in not_found_msgs for k in list(output.values()) if isinstance(k, six.string_types))): LOG.warning('SystemInfoCommandMissing on host: [%s]', client.host) raise errors.SystemInfoCommandMissing(('ohai-solo missing on %s' % client.host)) unicode_output = ('%s' % output['stdout']) try: results = json.loads(unicode_output) except ValueError as exc: try: clean_output = get_json(unicode_output) results = json.loads(clean_output) except ValueError as exc: raise errors.SystemInfoNotJson(exc) return results
Run ohai-solo on a remote system and gather the output. :param client: :class:`ssh.SSH` instance :param with_install Will install ohai-solo if set to True :param install_dir string containing directory to install to :returns: dict -- system information from ohai-solo :raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson SystemInfoMissingJson SystemInfoCommandMissing if `ohai` is not installed. SystemInfoCommandOld if `ohai` is not the latest. SystemInfoNotJson if `ohai` does not return valid JSON. SystemInfoMissingJson if `ohai` does not return any JSON.
satori/sysinfo/ohai_solo.py
system_info
samstav/satori
1
python
def system_info(client, with_install=False, install_dir=None): 'Run ohai-solo on a remote system and gather the output.\n\n :param client: :class:`ssh.SSH` instance\n :param with_install Will install ohai-solo if set to True\n :param install_dir string containing directory to install to\n :returns: dict -- system information from ohai-solo\n :raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson\n SystemInfoMissingJson\n\n SystemInfoCommandMissing if `ohai` is not installed.\n SystemInfoCommandOld if `ohai` is not the latest.\n SystemInfoNotJson if `ohai` does not return valid JSON.\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' if with_install: perform_install(client, install_dir=install_dir) if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) ohai_solo_prefix = (install_dir or '/opt') ohai_solo_command = six.moves.shlex_quote(('%s/ohai-solo/bin/ohai-solo' % ohai_solo_prefix)) command = ('unset GEM_CACHE GEM_HOME GEM_PATH && sudo %s' % ohai_solo_command) output = client.execute(command, escalate=True, allow_many=False) not_found_msgs = ['command not found', 'Could not find ohai'] if any(((m in k) for m in not_found_msgs for k in list(output.values()) if isinstance(k, six.string_types))): LOG.warning('SystemInfoCommandMissing on host: [%s]', client.host) raise errors.SystemInfoCommandMissing(('ohai-solo missing on %s' % client.host)) unicode_output = ('%s' % output['stdout']) try: results = json.loads(unicode_output) except ValueError as exc: try: clean_output = get_json(unicode_output) results = json.loads(clean_output) except ValueError as exc: raise errors.SystemInfoNotJson(exc) return results
def system_info(client, with_install=False, install_dir=None): 'Run ohai-solo on a remote system and gather the output.\n\n :param client: :class:`ssh.SSH` instance\n :param with_install Will install ohai-solo if set to True\n :param install_dir string containing directory to install to\n :returns: dict -- system information from ohai-solo\n :raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson\n SystemInfoMissingJson\n\n SystemInfoCommandMissing if `ohai` is not installed.\n SystemInfoCommandOld if `ohai` is not the latest.\n SystemInfoNotJson if `ohai` does not return valid JSON.\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' if with_install: perform_install(client, install_dir=install_dir) if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) ohai_solo_prefix = (install_dir or '/opt') ohai_solo_command = six.moves.shlex_quote(('%s/ohai-solo/bin/ohai-solo' % ohai_solo_prefix)) command = ('unset GEM_CACHE GEM_HOME GEM_PATH && sudo %s' % ohai_solo_command) output = client.execute(command, escalate=True, allow_many=False) not_found_msgs = ['command not found', 'Could not find ohai'] if any(((m in k) for m in not_found_msgs for k in list(output.values()) if isinstance(k, six.string_types))): LOG.warning('SystemInfoCommandMissing on host: [%s]', client.host) raise errors.SystemInfoCommandMissing(('ohai-solo missing on %s' % client.host)) unicode_output = ('%s' % output['stdout']) try: results = json.loads(unicode_output) except ValueError as exc: try: clean_output = get_json(unicode_output) results = json.loads(clean_output) except ValueError as exc: raise errors.SystemInfoNotJson(exc) return results<|docstring|>Run ohai-solo on a remote system and gather the output. :param client: :class:`ssh.SSH` instance :param with_install Will install ohai-solo if set to True :param install_dir string containing directory to install to :returns: dict -- system information from ohai-solo :raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson SystemInfoMissingJson SystemInfoCommandMissing if `ohai` is not installed. SystemInfoCommandOld if `ohai` is not the latest. SystemInfoNotJson if `ohai` does not return valid JSON. SystemInfoMissingJson if `ohai` does not return any JSON.<|endoftext|>
06835e00415b19bb3cc1b51ce06fa705a591007902d974d6a00a80a10980f1b1
def perform_install(client, install_dir=None): 'Install ohai-solo on remote system.\n\n :param client: :class:`ssh.SSH` instance\n :param install_dir string containing directory to install to\n ' LOG.info('Installing (or updating) ohai-solo on device %s at %s:%d', client.host, client.host, client.port) is_windows = False try: is_windows = client.is_windows() except Exception: pass if is_windows: raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: command = 'wget -N http://readonly.configdiscovery.rackspace.com/install.sh' output = client.execute(command, cwd='/tmp', escalate=True, allow_many=False) LOG.debug('Downloaded ohai-solo | %s', output['stdout']) command = 'bash install.sh' if install_dir: command = ('%s -t -i %s' % (command, six.moves.shlex_quote(install_dir))) install_output = client.execute(command, cwd='/tmp', with_exit_code=True, escalate=True, allow_many=False) LOG.debug('Ran ohai-solo install script. | %s.', install_output['stdout']) command = 'rm install.sh' client.execute(command, cwd='/tmp', escalate=True, allow_many=False) if (install_output['exit_code'] != 0): raise errors.SystemInfoCommandInstallFailed(install_output['stderr'][:256]) else: return install_output
Install ohai-solo on remote system. :param client: :class:`ssh.SSH` instance :param install_dir string containing directory to install to
satori/sysinfo/ohai_solo.py
perform_install
samstav/satori
1
python
def perform_install(client, install_dir=None): 'Install ohai-solo on remote system.\n\n :param client: :class:`ssh.SSH` instance\n :param install_dir string containing directory to install to\n ' LOG.info('Installing (or updating) ohai-solo on device %s at %s:%d', client.host, client.host, client.port) is_windows = False try: is_windows = client.is_windows() except Exception: pass if is_windows: raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: command = 'wget -N http://readonly.configdiscovery.rackspace.com/install.sh' output = client.execute(command, cwd='/tmp', escalate=True, allow_many=False) LOG.debug('Downloaded ohai-solo | %s', output['stdout']) command = 'bash install.sh' if install_dir: command = ('%s -t -i %s' % (command, six.moves.shlex_quote(install_dir))) install_output = client.execute(command, cwd='/tmp', with_exit_code=True, escalate=True, allow_many=False) LOG.debug('Ran ohai-solo install script. | %s.', install_output['stdout']) command = 'rm install.sh' client.execute(command, cwd='/tmp', escalate=True, allow_many=False) if (install_output['exit_code'] != 0): raise errors.SystemInfoCommandInstallFailed(install_output['stderr'][:256]) else: return install_output
def perform_install(client, install_dir=None): 'Install ohai-solo on remote system.\n\n :param client: :class:`ssh.SSH` instance\n :param install_dir string containing directory to install to\n ' LOG.info('Installing (or updating) ohai-solo on device %s at %s:%d', client.host, client.host, client.port) is_windows = False try: is_windows = client.is_windows() except Exception: pass if is_windows: raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: command = 'wget -N http://readonly.configdiscovery.rackspace.com/install.sh' output = client.execute(command, cwd='/tmp', escalate=True, allow_many=False) LOG.debug('Downloaded ohai-solo | %s', output['stdout']) command = 'bash install.sh' if install_dir: command = ('%s -t -i %s' % (command, six.moves.shlex_quote(install_dir))) install_output = client.execute(command, cwd='/tmp', with_exit_code=True, escalate=True, allow_many=False) LOG.debug('Ran ohai-solo install script. | %s.', install_output['stdout']) command = 'rm install.sh' client.execute(command, cwd='/tmp', escalate=True, allow_many=False) if (install_output['exit_code'] != 0): raise errors.SystemInfoCommandInstallFailed(install_output['stderr'][:256]) else: return install_output<|docstring|>Install ohai-solo on remote system. :param client: :class:`ssh.SSH` instance :param install_dir string containing directory to install to<|endoftext|>
354abe472d4bf5a991c7b50b98f907c44950fcb1720549f3c40501fee98ebced
def remove_remote(client, install_dir=None): 'Remove ohai-solo from specifc remote system.\n\n :param install_dir string containing directory ohai-solo was installed in\n Currently supports:\n - ubuntu [10.x, 12.x]\n - debian [6.x, 7.x]\n - redhat [5.x, 6.x]\n - centos [5.x, 6.x]\n ' if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: platform_info = client.platform_info if (install_dir is not None): install_dir = six.moves.shlex_quote(('%s/ohai-solo/' % install_dir)) remove = ('rm -rf %s' % install_dir) elif client.is_debian(): remove = 'dpkg --purge ohai-solo' elif client.is_fedora(): remove = 'yum -y erase ohai-solo' else: raise errors.UnsupportedPlatform(('Unknown distro: %s' % platform_info['dist'])) command = ('%s' % remove) output = client.execute(command, cwd='/tmp', escalate=True) return output
Remove ohai-solo from specifc remote system. :param install_dir string containing directory ohai-solo was installed in Currently supports: - ubuntu [10.x, 12.x] - debian [6.x, 7.x] - redhat [5.x, 6.x] - centos [5.x, 6.x]
satori/sysinfo/ohai_solo.py
remove_remote
samstav/satori
1
python
def remove_remote(client, install_dir=None): 'Remove ohai-solo from specifc remote system.\n\n :param install_dir string containing directory ohai-solo was installed in\n Currently supports:\n - ubuntu [10.x, 12.x]\n - debian [6.x, 7.x]\n - redhat [5.x, 6.x]\n - centos [5.x, 6.x]\n ' if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: platform_info = client.platform_info if (install_dir is not None): install_dir = six.moves.shlex_quote(('%s/ohai-solo/' % install_dir)) remove = ('rm -rf %s' % install_dir) elif client.is_debian(): remove = 'dpkg --purge ohai-solo' elif client.is_fedora(): remove = 'yum -y erase ohai-solo' else: raise errors.UnsupportedPlatform(('Unknown distro: %s' % platform_info['dist'])) command = ('%s' % remove) output = client.execute(command, cwd='/tmp', escalate=True) return output
def remove_remote(client, install_dir=None): 'Remove ohai-solo from specifc remote system.\n\n :param install_dir string containing directory ohai-solo was installed in\n Currently supports:\n - ubuntu [10.x, 12.x]\n - debian [6.x, 7.x]\n - redhat [5.x, 6.x]\n - centos [5.x, 6.x]\n ' if client.is_windows(): raise errors.UnsupportedPlatform('ohai-solo is a linux-only sytem info provider. Target platform was %s', client.platform_info['dist']) else: platform_info = client.platform_info if (install_dir is not None): install_dir = six.moves.shlex_quote(('%s/ohai-solo/' % install_dir)) remove = ('rm -rf %s' % install_dir) elif client.is_debian(): remove = 'dpkg --purge ohai-solo' elif client.is_fedora(): remove = 'yum -y erase ohai-solo' else: raise errors.UnsupportedPlatform(('Unknown distro: %s' % platform_info['dist'])) command = ('%s' % remove) output = client.execute(command, cwd='/tmp', escalate=True) return output<|docstring|>Remove ohai-solo from specifc remote system. :param install_dir string containing directory ohai-solo was installed in Currently supports: - ubuntu [10.x, 12.x] - debian [6.x, 7.x] - redhat [5.x, 6.x] - centos [5.x, 6.x]<|endoftext|>
49ef53e727091d5bbbc20b1a4e065995d7757249e9e6ce86af778b46f212d0a6
def get_json(data): 'Find the JSON string in data and return a string.\n\n :param data: :string:\n :returns: string -- JSON string stripped of non-JSON data\n :raises: SystemInfoMissingJson\n\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' try: first = data.index('{') last = data.rindex('}') return data[first:(last + 1)] except ValueError as exc: context = {'ValueError': ('%s' % exc)} raise errors.SystemInfoMissingJson(context)
Find the JSON string in data and return a string. :param data: :string: :returns: string -- JSON string stripped of non-JSON data :raises: SystemInfoMissingJson SystemInfoMissingJson if `ohai` does not return any JSON.
satori/sysinfo/ohai_solo.py
get_json
samstav/satori
1
python
def get_json(data): 'Find the JSON string in data and return a string.\n\n :param data: :string:\n :returns: string -- JSON string stripped of non-JSON data\n :raises: SystemInfoMissingJson\n\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' try: first = data.index('{') last = data.rindex('}') return data[first:(last + 1)] except ValueError as exc: context = {'ValueError': ('%s' % exc)} raise errors.SystemInfoMissingJson(context)
def get_json(data): 'Find the JSON string in data and return a string.\n\n :param data: :string:\n :returns: string -- JSON string stripped of non-JSON data\n :raises: SystemInfoMissingJson\n\n SystemInfoMissingJson if `ohai` does not return any JSON.\n ' try: first = data.index('{') last = data.rindex('}') return data[first:(last + 1)] except ValueError as exc: context = {'ValueError': ('%s' % exc)} raise errors.SystemInfoMissingJson(context)<|docstring|>Find the JSON string in data and return a string. :param data: :string: :returns: string -- JSON string stripped of non-JSON data :raises: SystemInfoMissingJson SystemInfoMissingJson if `ohai` does not return any JSON.<|endoftext|>
265c77867653f20c338d797a71610ebf7e995a221af6d1f1211a68bfe4df6929
def get_pipeline_options(key, pipeline_cfg_uri): 'Returns a dict with the options/hyperparameters for a pipeline run.' pipeline_dict = file_to_json(pipeline_cfg_uri) solver = pipeline_dict['backend']['solver'] data = pipeline_dict['backend']['data'] num_epochs = solver['num_epochs'] train_sz = data['train_sz_rel'] opts = {'key': key, 'num_epochs': num_epochs, 'train_sz': train_sz} return opts
Returns a dict with the options/hyperparameters for a pipeline run.
spacenet/ssl_analysis.py
get_pipeline_options
lewfish/ssl
0
python
def get_pipeline_options(key, pipeline_cfg_uri): pipeline_dict = file_to_json(pipeline_cfg_uri) solver = pipeline_dict['backend']['solver'] data = pipeline_dict['backend']['data'] num_epochs = solver['num_epochs'] train_sz = data['train_sz_rel'] opts = {'key': key, 'num_epochs': num_epochs, 'train_sz': train_sz} return opts
def get_pipeline_options(key, pipeline_cfg_uri): pipeline_dict = file_to_json(pipeline_cfg_uri) solver = pipeline_dict['backend']['solver'] data = pipeline_dict['backend']['data'] num_epochs = solver['num_epochs'] train_sz = data['train_sz_rel'] opts = {'key': key, 'num_epochs': num_epochs, 'train_sz': train_sz} return opts<|docstring|>Returns a dict with the options/hyperparameters for a pipeline run.<|endoftext|>
9fdf8c1947ad4c6e1589385f93ff94530da0d7181e694de47ea75e991489344e
def pad_rows(arr1, arr2): "\n Pad the array with the least numer of rows with NaN's\n " if (arr2.ndim == 1): pass elif (arr2.ndim == 2): if (arr1.shape[0] < arr2.shape[0]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(0:buff.shape[0], :)] = buff elif (arr1.shape[0] > arr2.shape[0]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(0:buff.shape[0], :)] = buff elif (arr1.shape[1] < arr2.shape[1]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(:, 0:buff.shape[1], :)] = buff elif (arr1.shape[1] > arr2.shape[1]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(:, 0:buff.shape[1], :)] = buff return (arr1, arr2)
Pad the array with the least numer of rows with NaN's
src/pys5p/l1b_io.py
pad_rows
rmvanhees/pys5p
10
python
def pad_rows(arr1, arr2): "\n \n " if (arr2.ndim == 1): pass elif (arr2.ndim == 2): if (arr1.shape[0] < arr2.shape[0]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(0:buff.shape[0], :)] = buff elif (arr1.shape[0] > arr2.shape[0]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(0:buff.shape[0], :)] = buff elif (arr1.shape[1] < arr2.shape[1]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(:, 0:buff.shape[1], :)] = buff elif (arr1.shape[1] > arr2.shape[1]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(:, 0:buff.shape[1], :)] = buff return (arr1, arr2)
def pad_rows(arr1, arr2): "\n \n " if (arr2.ndim == 1): pass elif (arr2.ndim == 2): if (arr1.shape[0] < arr2.shape[0]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(0:buff.shape[0], :)] = buff elif (arr1.shape[0] > arr2.shape[0]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(0:buff.shape[0], :)] = buff elif (arr1.shape[1] < arr2.shape[1]): buff = arr1.copy() arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype) arr1[(:, 0:buff.shape[1], :)] = buff elif (arr1.shape[1] > arr2.shape[1]): buff = arr2.copy() arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype) arr2[(:, 0:buff.shape[1], :)] = buff return (arr1, arr2)<|docstring|>Pad the array with the least numer of rows with NaN's<|endoftext|>
6331a11b2cbb87c0fd22f2ca4eae84c6a430676cf2e7f42df363e8d8415db4c0
def __init__(self, l1b_product, readwrite=False, verbose=False): '\n Initialize access to a Tropomi offline L1b product\n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.__rw = readwrite self.__verbose = verbose self.__msm_path = None self.__patched_msm = [] self.filename = l1b_product self.bands = '' if readwrite: self.fid = h5py.File(l1b_product, 'r+') else: self.fid = h5py.File(l1b_product, 'r')
Initialize access to a Tropomi offline L1b product
src/pys5p/l1b_io.py
__init__
rmvanhees/pys5p
10
python
def __init__(self, l1b_product, readwrite=False, verbose=False): '\n \n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.__rw = readwrite self.__verbose = verbose self.__msm_path = None self.__patched_msm = [] self.filename = l1b_product self.bands = if readwrite: self.fid = h5py.File(l1b_product, 'r+') else: self.fid = h5py.File(l1b_product, 'r')
def __init__(self, l1b_product, readwrite=False, verbose=False): '\n \n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.__rw = readwrite self.__verbose = verbose self.__msm_path = None self.__patched_msm = [] self.filename = l1b_product self.bands = if readwrite: self.fid = h5py.File(l1b_product, 'r+') else: self.fid = h5py.File(l1b_product, 'r')<|docstring|>Initialize access to a Tropomi offline L1b product<|endoftext|>
3fe98a939b862bbfeda66c816f8a32ab391f5a19a2a86e4b74d710e34c75127e
def __enter__(self): '\n method called to initiate the context manager\n ' return self
method called to initiate the context manager
src/pys5p/l1b_io.py
__enter__
rmvanhees/pys5p
10
python
def __enter__(self): '\n \n ' return self
def __enter__(self): '\n \n ' return self<|docstring|>method called to initiate the context manager<|endoftext|>
a845abdbeae5f39d06b5d1da3e7bb86971111311cbc47b27bbe00cbf9d7b283f
def __exit__(self, exc_type, exc_value, traceback): '\n method called when exiting the context manager\n ' self.close() return False
method called when exiting the context manager
src/pys5p/l1b_io.py
__exit__
rmvanhees/pys5p
10
python
def __exit__(self, exc_type, exc_value, traceback): '\n \n ' self.close() return False
def __exit__(self, exc_type, exc_value, traceback): '\n \n ' self.close() return False<|docstring|>method called when exiting the context manager<|endoftext|>
c3ab55ba2a2fd9a4f4d34d2569af7b347731123b4e083c6c5fea90090049b3b1
def close(self): '\n Close resources.\n\n Notes\n -----\n Before closing the product, we make sure that the output product\n describes what has been altered by the S/W. To keep any change\n traceable.\n\n In case the L1b product is altered, the attributes listed below are\n added to the group: "/METADATA/SRON_METADATA":\n - dateStamp (\'now\')\n - Git-version of S/W\n - list of patched datasets\n - auxiliary datasets used by patch-routines\n ' if (self.fid is None): return if self.__patched_msm: sgrp = self.fid.require_group('/METADATA/SRON_METADATA') sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat() sgrp.attrs['git_tag'] = get_version(root='..', relative_to=__file__) if ('patched_datasets' not in sgrp): dtype = h5py.special_dtype(vlen=str) dset = sgrp.create_dataset('patched_datasets', (len(self.__patched_msm),), maxshape=(None,), dtype=dtype) dset[:] = np.asarray(self.__patched_msm) else: dset = sgrp['patched_datasets'] dset.resize((dset.shape[0] + len(self.__patched_msm)), axis=0) dset[(dset.shape[0] - 1):] = np.asarray(self.__patched_msm) self.fid.close() self.fid = None
Close resources. Notes ----- Before closing the product, we make sure that the output product describes what has been altered by the S/W. To keep any change traceable. In case the L1b product is altered, the attributes listed below are added to the group: "/METADATA/SRON_METADATA": - dateStamp ('now') - Git-version of S/W - list of patched datasets - auxiliary datasets used by patch-routines
src/pys5p/l1b_io.py
close
rmvanhees/pys5p
10
python
def close(self): '\n Close resources.\n\n Notes\n -----\n Before closing the product, we make sure that the output product\n describes what has been altered by the S/W. To keep any change\n traceable.\n\n In case the L1b product is altered, the attributes listed below are\n added to the group: "/METADATA/SRON_METADATA":\n - dateStamp (\'now\')\n - Git-version of S/W\n - list of patched datasets\n - auxiliary datasets used by patch-routines\n ' if (self.fid is None): return if self.__patched_msm: sgrp = self.fid.require_group('/METADATA/SRON_METADATA') sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat() sgrp.attrs['git_tag'] = get_version(root='..', relative_to=__file__) if ('patched_datasets' not in sgrp): dtype = h5py.special_dtype(vlen=str) dset = sgrp.create_dataset('patched_datasets', (len(self.__patched_msm),), maxshape=(None,), dtype=dtype) dset[:] = np.asarray(self.__patched_msm) else: dset = sgrp['patched_datasets'] dset.resize((dset.shape[0] + len(self.__patched_msm)), axis=0) dset[(dset.shape[0] - 1):] = np.asarray(self.__patched_msm) self.fid.close() self.fid = None
def close(self): '\n Close resources.\n\n Notes\n -----\n Before closing the product, we make sure that the output product\n describes what has been altered by the S/W. To keep any change\n traceable.\n\n In case the L1b product is altered, the attributes listed below are\n added to the group: "/METADATA/SRON_METADATA":\n - dateStamp (\'now\')\n - Git-version of S/W\n - list of patched datasets\n - auxiliary datasets used by patch-routines\n ' if (self.fid is None): return if self.__patched_msm: sgrp = self.fid.require_group('/METADATA/SRON_METADATA') sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat() sgrp.attrs['git_tag'] = get_version(root='..', relative_to=__file__) if ('patched_datasets' not in sgrp): dtype = h5py.special_dtype(vlen=str) dset = sgrp.create_dataset('patched_datasets', (len(self.__patched_msm),), maxshape=(None,), dtype=dtype) dset[:] = np.asarray(self.__patched_msm) else: dset = sgrp['patched_datasets'] dset.resize((dset.shape[0] + len(self.__patched_msm)), axis=0) dset[(dset.shape[0] - 1):] = np.asarray(self.__patched_msm) self.fid.close() self.fid = None<|docstring|>Close resources. Notes ----- Before closing the product, we make sure that the output product describes what has been altered by the S/W. To keep any change traceable. In case the L1b product is altered, the attributes listed below are added to the group: "/METADATA/SRON_METADATA": - dateStamp ('now') - Git-version of S/W - list of patched datasets - auxiliary datasets used by patch-routines<|endoftext|>
c47a75ae91a189db08c3e4526b673ae8b09f5412a39402abc4fa6921d82ed12f
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr
Obtain value of an HDF5 file attribute Parameters ---------- attr_name : string Name of the attribute
src/pys5p/l1b_io.py
get_attr
rmvanhees/pys5p
10
python
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr<|docstring|>Obtain value of an HDF5 file attribute Parameters ---------- attr_name : string Name of the attribute<|endoftext|>
8cebf5becd45e824ffdfbb60aa554c79403787cd0d77670998873d0678ae7c03
def get_orbit(self): '\n Returns absolute orbit number\n ' res = self.get_attr('orbit') if (res is None): return None return int(res)
Returns absolute orbit number
src/pys5p/l1b_io.py
get_orbit
rmvanhees/pys5p
10
python
def get_orbit(self): '\n \n ' res = self.get_attr('orbit') if (res is None): return None return int(res)
def get_orbit(self): '\n \n ' res = self.get_attr('orbit') if (res is None): return None return int(res)<|docstring|>Returns absolute orbit number<|endoftext|>
84fe905dc79d32d37a43761f4e804059ae9d5d3a8732c42f5cc0743099b0eff5
def get_processor_version(self): '\n Returns version of the L01b processor\n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')
Returns version of the L01b processor
src/pys5p/l1b_io.py
get_processor_version
rmvanhees/pys5p
10
python
def get_processor_version(self): '\n \n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')
def get_processor_version(self): '\n \n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')<|docstring|>Returns version of the L01b processor<|endoftext|>
b75a6e4230de265a80abf91e3ce7292b442b33ffe29bb6d35ec0bcc384888548
def get_coverage_time(self): '\n Returns start and end of the measurement coverage time\n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))
Returns start and end of the measurement coverage time
src/pys5p/l1b_io.py
get_coverage_time
rmvanhees/pys5p
10
python
def get_coverage_time(self): '\n \n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))
def get_coverage_time(self): '\n \n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))<|docstring|>Returns start and end of the measurement coverage time<|endoftext|>
052e7187d24235786bf1e3729f0f53bbca9d2be91bdc86a936950954a87b7d46
def get_creation_time(self): '\n Returns datetime when the L1b product was created\n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
Returns datetime when the L1b product was created
src/pys5p/l1b_io.py
get_creation_time
rmvanhees/pys5p
10
python
def get_creation_time(self): '\n \n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
def get_creation_time(self): '\n \n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None<|docstring|>Returns datetime when the L1b product was created<|endoftext|>
a902289cc49e278fc68e9e9dca612d6d07b12804b3b9dc13fa7fff375a70c59e
def select(self, msm_type=None): '\n Select a calibration measurement as <processing class>_<ic_id>\n\n Parameters\n ----------\n msm_type : string\n Name of calibration measurement group as <processing class>_<ic_id>\n\n Returns\n -------\n out : string\n String with spectral bands found in product\n\n Updated object attributes:\n - bands : available spectral bands\n ' if (msm_type is None): if (self.msm_type is None): raise ValueError('parameter msm_type is not defined') msm_type = self.msm_type self.bands = '' for name in self.band_groups: for ii in '12345678': grp_path = PurePosixPath(name.replace('%', ii), msm_type) if (str(grp_path) in self.fid): if self.__verbose: print('*** INFO: found: ', grp_path) self.bands += ii if self.bands: self.__msm_path = str(PurePosixPath(name, msm_type)) break return self.bands
Select a calibration measurement as <processing class>_<ic_id> Parameters ---------- msm_type : string Name of calibration measurement group as <processing class>_<ic_id> Returns ------- out : string String with spectral bands found in product Updated object attributes: - bands : available spectral bands
src/pys5p/l1b_io.py
select
rmvanhees/pys5p
10
python
def select(self, msm_type=None): '\n Select a calibration measurement as <processing class>_<ic_id>\n\n Parameters\n ----------\n msm_type : string\n Name of calibration measurement group as <processing class>_<ic_id>\n\n Returns\n -------\n out : string\n String with spectral bands found in product\n\n Updated object attributes:\n - bands : available spectral bands\n ' if (msm_type is None): if (self.msm_type is None): raise ValueError('parameter msm_type is not defined') msm_type = self.msm_type self.bands = for name in self.band_groups: for ii in '12345678': grp_path = PurePosixPath(name.replace('%', ii), msm_type) if (str(grp_path) in self.fid): if self.__verbose: print('*** INFO: found: ', grp_path) self.bands += ii if self.bands: self.__msm_path = str(PurePosixPath(name, msm_type)) break return self.bands
def select(self, msm_type=None): '\n Select a calibration measurement as <processing class>_<ic_id>\n\n Parameters\n ----------\n msm_type : string\n Name of calibration measurement group as <processing class>_<ic_id>\n\n Returns\n -------\n out : string\n String with spectral bands found in product\n\n Updated object attributes:\n - bands : available spectral bands\n ' if (msm_type is None): if (self.msm_type is None): raise ValueError('parameter msm_type is not defined') msm_type = self.msm_type self.bands = for name in self.band_groups: for ii in '12345678': grp_path = PurePosixPath(name.replace('%', ii), msm_type) if (str(grp_path) in self.fid): if self.__verbose: print('*** INFO: found: ', grp_path) self.bands += ii if self.bands: self.__msm_path = str(PurePosixPath(name, msm_type)) break return self.bands<|docstring|>Select a calibration measurement as <processing class>_<ic_id> Parameters ---------- msm_type : string Name of calibration measurement group as <processing class>_<ic_id> Returns ------- out : string String with spectral bands found in product Updated object attributes: - bands : available spectral bands<|endoftext|>
37850a1acf4159184ef32998e8467b79d63fb3719f9c2297efc445336767f996
def sequence(self, band=None): "\n Returns sequence number for each unique measurement based on ICID\n and delta_time\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : array-like\n Numpy rec-array with sequence number, ICID and delta-time\n " if (self.__msm_path is None): return None if ((band is None) or (len(band) > 1)): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] icid_list = np.squeeze(grp['instrument_configuration']['ic_id']) master_cycle = grp['instrument_settings']['master_cycle_period_us'][0] master_cycle /= 1000 grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] delta_time = np.squeeze(grp['delta_time']) length = delta_time.size res = np.empty((length,), dtype=[('sequence', 'u2'), ('icid', 'u2'), ('delta_time', 'u4'), ('index', 'u4')]) res['sequence'] = [0] res['icid'] = icid_list res['delta_time'] = delta_time res['index'] = np.arange(length, dtype=np.uint32) if (length == 1): return res buff_icid = np.concatenate(([(icid_list[0] - 10)], icid_list, [(icid_list[(- 1)] + 10)])) dt_thres = (10 * master_cycle) buff_time = np.concatenate(([(delta_time[0] - (10 * dt_thres))], delta_time, [(delta_time[(- 1)] + (10 * dt_thres))])) indx = (((buff_time[1:] - buff_time[0:(- 1)]) > dt_thres) | ((buff_icid[1:] - buff_icid[0:(- 1)]) != 0)).nonzero()[0] for ii in range((len(indx) - 1)): res['sequence'][indx[ii]:indx[(ii + 1)]] = ii return res
Returns sequence number for each unique measurement based on ICID and delta_time Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : array-like Numpy rec-array with sequence number, ICID and delta-time
src/pys5p/l1b_io.py
sequence
rmvanhees/pys5p
10
python
def sequence(self, band=None): "\n Returns sequence number for each unique measurement based on ICID\n and delta_time\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : array-like\n Numpy rec-array with sequence number, ICID and delta-time\n " if (self.__msm_path is None): return None if ((band is None) or (len(band) > 1)): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] icid_list = np.squeeze(grp['instrument_configuration']['ic_id']) master_cycle = grp['instrument_settings']['master_cycle_period_us'][0] master_cycle /= 1000 grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] delta_time = np.squeeze(grp['delta_time']) length = delta_time.size res = np.empty((length,), dtype=[('sequence', 'u2'), ('icid', 'u2'), ('delta_time', 'u4'), ('index', 'u4')]) res['sequence'] = [0] res['icid'] = icid_list res['delta_time'] = delta_time res['index'] = np.arange(length, dtype=np.uint32) if (length == 1): return res buff_icid = np.concatenate(([(icid_list[0] - 10)], icid_list, [(icid_list[(- 1)] + 10)])) dt_thres = (10 * master_cycle) buff_time = np.concatenate(([(delta_time[0] - (10 * dt_thres))], delta_time, [(delta_time[(- 1)] + (10 * dt_thres))])) indx = (((buff_time[1:] - buff_time[0:(- 1)]) > dt_thres) | ((buff_icid[1:] - buff_icid[0:(- 1)]) != 0)).nonzero()[0] for ii in range((len(indx) - 1)): res['sequence'][indx[ii]:indx[(ii + 1)]] = ii return res
def sequence(self, band=None): "\n Returns sequence number for each unique measurement based on ICID\n and delta_time\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : array-like\n Numpy rec-array with sequence number, ICID and delta-time\n " if (self.__msm_path is None): return None if ((band is None) or (len(band) > 1)): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] icid_list = np.squeeze(grp['instrument_configuration']['ic_id']) master_cycle = grp['instrument_settings']['master_cycle_period_us'][0] master_cycle /= 1000 grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] delta_time = np.squeeze(grp['delta_time']) length = delta_time.size res = np.empty((length,), dtype=[('sequence', 'u2'), ('icid', 'u2'), ('delta_time', 'u4'), ('index', 'u4')]) res['sequence'] = [0] res['icid'] = icid_list res['delta_time'] = delta_time res['index'] = np.arange(length, dtype=np.uint32) if (length == 1): return res buff_icid = np.concatenate(([(icid_list[0] - 10)], icid_list, [(icid_list[(- 1)] + 10)])) dt_thres = (10 * master_cycle) buff_time = np.concatenate(([(delta_time[0] - (10 * dt_thres))], delta_time, [(delta_time[(- 1)] + (10 * dt_thres))])) indx = (((buff_time[1:] - buff_time[0:(- 1)]) > dt_thres) | ((buff_icid[1:] - buff_icid[0:(- 1)]) != 0)).nonzero()[0] for ii in range((len(indx) - 1)): res['sequence'][indx[ii]:indx[(ii + 1)]] = ii return res<|docstring|>Returns sequence number for each unique measurement based on ICID and delta_time Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : array-like Numpy rec-array with sequence number, ICID and delta-time<|endoftext|>
fe0960439be4e1a41c8baa785b4e929b5ee178f30eed6103587918887de63f3b
def get_ref_time(self, band=None): "\n Returns reference start time of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return (datetime(2010, 1, 1, 0, 0, 0) + timedelta(seconds=int(grp['time'][0])))
Returns reference start time of measurements Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band
src/pys5p/l1b_io.py
get_ref_time
rmvanhees/pys5p
10
python
def get_ref_time(self, band=None): "\n Returns reference start time of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return (datetime(2010, 1, 1, 0, 0, 0) + timedelta(seconds=int(grp['time'][0])))
def get_ref_time(self, band=None): "\n Returns reference start time of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return (datetime(2010, 1, 1, 0, 0, 0) + timedelta(seconds=int(grp['time'][0])))<|docstring|>Returns reference start time of measurements Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band<|endoftext|>
efb3f5e024dd14977d5dbbe9f77eb64afba37b72fdd91a80a94a7cca64cfa4fc
def get_delta_time(self, band=None): "\n Returns offset from the reference start time of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return grp['delta_time'][(0, :)].astype(int)
Returns offset from the reference start time of measurement Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band
src/pys5p/l1b_io.py
get_delta_time
rmvanhees/pys5p
10
python
def get_delta_time(self, band=None): "\n Returns offset from the reference start time of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return grp['delta_time'][(0, :)].astype(int)
def get_delta_time(self, band=None): "\n Returns offset from the reference start time of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))] return grp['delta_time'][(0, :)].astype(int)<|docstring|>Returns offset from the reference start time of measurement Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band<|endoftext|>
f79acc6afa394a1952ddc401188623f6cd0ffe54d3795162873dc110a695516b
def get_instrument_settings(self, band=None): "\n Returns instrument settings of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] instr = np.empty(grp['instrument_settings'].shape, dtype=grp['instrument_settings'].dtype) grp['instrument_settings'].read_direct(instr) return instr
Returns instrument settings of measurement Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band
src/pys5p/l1b_io.py
get_instrument_settings
rmvanhees/pys5p
10
python
def get_instrument_settings(self, band=None): "\n Returns instrument settings of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] instr = np.empty(grp['instrument_settings'].shape, dtype=grp['instrument_settings'].dtype) grp['instrument_settings'].read_direct(instr) return instr
def get_instrument_settings(self, band=None): "\n Returns instrument settings of measurement\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product.\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] instr = np.empty(grp['instrument_settings'].shape, dtype=grp['instrument_settings'].dtype) grp['instrument_settings'].read_direct(instr) return instr<|docstring|>Returns instrument settings of measurement Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product. Default is 'None' which returns the first available band<|endoftext|>
69a8ac02239d33a6380b1952e4ba67af932a5c4acf6542ea391298ad47b018a8
def get_exposure_time(self, band=None): "\n Returns pixel exposure time of the measurements, which is calculated\n from the parameters 'int_delay' and 'int_hold' for SWIR.\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (band is None): band = self.bands[0] instr_arr = self.get_instrument_settings(band) if (int(band) < 7): return [instr['exposure_time'] for instr in instr_arr] return [swir_exp_time(instr['int_delay'], instr['int_hold']) for instr in instr_arr]
Returns pixel exposure time of the measurements, which is calculated from the parameters 'int_delay' and 'int_hold' for SWIR. Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band
src/pys5p/l1b_io.py
get_exposure_time
rmvanhees/pys5p
10
python
def get_exposure_time(self, band=None): "\n Returns pixel exposure time of the measurements, which is calculated\n from the parameters 'int_delay' and 'int_hold' for SWIR.\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (band is None): band = self.bands[0] instr_arr = self.get_instrument_settings(band) if (int(band) < 7): return [instr['exposure_time'] for instr in instr_arr] return [swir_exp_time(instr['int_delay'], instr['int_hold']) for instr in instr_arr]
def get_exposure_time(self, band=None): "\n Returns pixel exposure time of the measurements, which is calculated\n from the parameters 'int_delay' and 'int_hold' for SWIR.\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (band is None): band = self.bands[0] instr_arr = self.get_instrument_settings(band) if (int(band) < 7): return [instr['exposure_time'] for instr in instr_arr] return [swir_exp_time(instr['int_delay'], instr['int_hold']) for instr in instr_arr]<|docstring|>Returns pixel exposure time of the measurements, which is calculated from the parameters 'int_delay' and 'int_hold' for SWIR. Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band<|endoftext|>
ea8a27cbe3639e59b8eaee83bb5478a78472447a9f058113b977812466e52352
def get_housekeeping_data(self, band=None): "\n Returns housekeeping data of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] return np.squeeze(grp['housekeeping_data'])
Returns housekeeping data of measurements Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band
src/pys5p/l1b_io.py
get_housekeeping_data
rmvanhees/pys5p
10
python
def get_housekeeping_data(self, band=None): "\n Returns housekeeping data of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] return np.squeeze(grp['housekeeping_data'])
def get_housekeeping_data(self, band=None): "\n Returns housekeeping data of measurements\n\n Parameters\n ----------\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n " if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))] return np.squeeze(grp['housekeeping_data'])<|docstring|>Returns housekeeping data of measurements Parameters ---------- band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band<|endoftext|>