repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
raamana/pyradigm
pyradigm/pyradigm.py
cli_run
def cli_run(): """ Command line interface This interface saves you coding effort to: - display basic info (classes, sizes etc) about datasets - display meta data (class membership) for samples - perform basic arithmetic (add multiple classes or feature sets) """ path_list, meta_requested, summary_requested, add_path_list, out_path = parse_args() # printing info if requested if path_list: for ds_path in path_list: ds = MLDataset(ds_path) if summary_requested: print_info(ds, ds_path) if meta_requested: print_meta(ds, ds_path) # combining datasets if add_path_list: combine_and_save(add_path_list, out_path) return
python
def cli_run(): """ Command line interface This interface saves you coding effort to: - display basic info (classes, sizes etc) about datasets - display meta data (class membership) for samples - perform basic arithmetic (add multiple classes or feature sets) """ path_list, meta_requested, summary_requested, add_path_list, out_path = parse_args() # printing info if requested if path_list: for ds_path in path_list: ds = MLDataset(ds_path) if summary_requested: print_info(ds, ds_path) if meta_requested: print_meta(ds, ds_path) # combining datasets if add_path_list: combine_and_save(add_path_list, out_path) return
[ "def", "cli_run", "(", ")", ":", "path_list", ",", "meta_requested", ",", "summary_requested", ",", "add_path_list", ",", "out_path", "=", "parse_args", "(", ")", "# printing info if requested", "if", "path_list", ":", "for", "ds_path", "in", "path_list", ":", "...
Command line interface This interface saves you coding effort to: - display basic info (classes, sizes etc) about datasets - display meta data (class membership) for samples - perform basic arithmetic (add multiple classes or feature sets)
[ "Command", "line", "interface" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1576-L1604
raamana/pyradigm
pyradigm/pyradigm.py
print_info
def print_info(ds, ds_path=None): "Prints basic summary of a given dataset." if ds_path is None: bname = '' else: bname = basename(ds_path) dashes = '-' * len(bname) print('\n{}\n{}\n{:full}'.format(dashes, bname, ds)) return
python
def print_info(ds, ds_path=None): "Prints basic summary of a given dataset." if ds_path is None: bname = '' else: bname = basename(ds_path) dashes = '-' * len(bname) print('\n{}\n{}\n{:full}'.format(dashes, bname, ds)) return
[ "def", "print_info", "(", "ds", ",", "ds_path", "=", "None", ")", ":", "if", "ds_path", "is", "None", ":", "bname", "=", "''", "else", ":", "bname", "=", "basename", "(", "ds_path", ")", "dashes", "=", "'-'", "*", "len", "(", "bname", ")", "print",...
Prints basic summary of a given dataset.
[ "Prints", "basic", "summary", "of", "a", "given", "dataset", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1607-L1618
raamana/pyradigm
pyradigm/pyradigm.py
print_meta
def print_meta(ds, ds_path=None): "Prints meta data for subjects in given dataset." print('\n#' + ds_path) for sub, cls in ds.classes.items(): print('{},{}'.format(sub, cls)) return
python
def print_meta(ds, ds_path=None): "Prints meta data for subjects in given dataset." print('\n#' + ds_path) for sub, cls in ds.classes.items(): print('{},{}'.format(sub, cls)) return
[ "def", "print_meta", "(", "ds", ",", "ds_path", "=", "None", ")", ":", "print", "(", "'\\n#'", "+", "ds_path", ")", "for", "sub", ",", "cls", "in", "ds", ".", "classes", ".", "items", "(", ")", ":", "print", "(", "'{},{}'", ".", "format", "(", "s...
Prints meta data for subjects in given dataset.
[ "Prints", "meta", "data", "for", "subjects", "in", "given", "dataset", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1621-L1628
raamana/pyradigm
pyradigm/pyradigm.py
combine_and_save
def combine_and_save(add_path_list, out_path): """ Combines whatever datasets that can be combined, and save the bigger dataset to a given location. """ add_path_list = list(add_path_list) # first one! first_ds_path = add_path_list[0] print('Starting with {}'.format(first_ds_path)) combined = MLDataset(first_ds_path) for ds_path in add_path_list[1:]: try: combined = combined + MLDataset(ds_path) except: print(' Failed to add {}'.format(ds_path)) traceback.print_exc() else: print('Successfully added {}'.format(ds_path)) combined.save(out_path) return
python
def combine_and_save(add_path_list, out_path): """ Combines whatever datasets that can be combined, and save the bigger dataset to a given location. """ add_path_list = list(add_path_list) # first one! first_ds_path = add_path_list[0] print('Starting with {}'.format(first_ds_path)) combined = MLDataset(first_ds_path) for ds_path in add_path_list[1:]: try: combined = combined + MLDataset(ds_path) except: print(' Failed to add {}'.format(ds_path)) traceback.print_exc() else: print('Successfully added {}'.format(ds_path)) combined.save(out_path) return
[ "def", "combine_and_save", "(", "add_path_list", ",", "out_path", ")", ":", "add_path_list", "=", "list", "(", "add_path_list", ")", "# first one!", "first_ds_path", "=", "add_path_list", "[", "0", "]", "print", "(", "'Starting with {}'", ".", "format", "(", "fi...
Combines whatever datasets that can be combined, and save the bigger dataset to a given location.
[ "Combines", "whatever", "datasets", "that", "can", "be", "combined", "and", "save", "the", "bigger", "dataset", "to", "a", "given", "location", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1631-L1653
raamana/pyradigm
pyradigm/pyradigm.py
get_parser
def get_parser(): """Argument specifier. """ parser = argparse.ArgumentParser(prog='pyradigm') parser.add_argument('path_list', nargs='*', action='store', default=None, help='List of paths to display info about.') parser.add_argument('-m', '--meta', action='store_true', dest='meta_requested', required=False, default=False, help='Prints the meta data (subject_id,class).') parser.add_argument('-i', '--info', action='store_true', dest='summary_requested', required=False, default=False, help='Prints summary info (classes, #samples, #features).') arithmetic_group = parser.add_argument_group('Options for multiple datasets') arithmetic_group.add_argument('-a', '--add', nargs='+', action='store', dest='add_path_list', required=False, default=None, help='List of MLDatasets to combine') arithmetic_group.add_argument('-o', '--out_path', action='store', dest='out_path', required=False, default=None, help='Output path to save the resulting dataset.') return parser
python
def get_parser(): """Argument specifier. """ parser = argparse.ArgumentParser(prog='pyradigm') parser.add_argument('path_list', nargs='*', action='store', default=None, help='List of paths to display info about.') parser.add_argument('-m', '--meta', action='store_true', dest='meta_requested', required=False, default=False, help='Prints the meta data (subject_id,class).') parser.add_argument('-i', '--info', action='store_true', dest='summary_requested', required=False, default=False, help='Prints summary info (classes, #samples, #features).') arithmetic_group = parser.add_argument_group('Options for multiple datasets') arithmetic_group.add_argument('-a', '--add', nargs='+', action='store', dest='add_path_list', required=False, default=None, help='List of MLDatasets to combine') arithmetic_group.add_argument('-o', '--out_path', action='store', dest='out_path', required=False, default=None, help='Output path to save the resulting dataset.') return parser
[ "def", "get_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'pyradigm'", ")", "parser", ".", "add_argument", "(", "'path_list'", ",", "nargs", "=", "'*'", ",", "action", "=", "'store'", ",", "default", "=", "N...
Argument specifier.
[ "Argument", "specifier", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1656-L1686
raamana/pyradigm
pyradigm/pyradigm.py
parse_args
def parse_args(): """Arg parser. """ parser = get_parser() if len(sys.argv) < 2: parser.print_help() logging.warning('Too few arguments!') parser.exit(1) # parsing try: params = parser.parse_args() except Exception as exc: print(exc) raise ValueError('Unable to parse command-line arguments.') path_list = list() if params.path_list is not None: for dpath in params.path_list: if pexists(dpath): path_list.append(realpath(dpath)) else: print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath)) add_path_list = list() out_path = None if params.add_path_list is not None: for dpath in params.add_path_list: if pexists(dpath): add_path_list.append(realpath(dpath)) else: print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath)) if params.out_path is None: raise ValueError( 'Output path must be specified to save the combined dataset to') out_path = realpath(params.out_path) parent_dir = dirname(out_path) if not pexists(parent_dir): os.mkdir(parent_dir) if len(add_path_list) < 2: raise ValueError('Need a minimum of datasets to combine!!') # removing duplicates (from regex etc) path_list = set(path_list) add_path_list = set(add_path_list) return path_list, params.meta_requested, params.summary_requested, \ add_path_list, out_path
python
def parse_args(): """Arg parser. """ parser = get_parser() if len(sys.argv) < 2: parser.print_help() logging.warning('Too few arguments!') parser.exit(1) # parsing try: params = parser.parse_args() except Exception as exc: print(exc) raise ValueError('Unable to parse command-line arguments.') path_list = list() if params.path_list is not None: for dpath in params.path_list: if pexists(dpath): path_list.append(realpath(dpath)) else: print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath)) add_path_list = list() out_path = None if params.add_path_list is not None: for dpath in params.add_path_list: if pexists(dpath): add_path_list.append(realpath(dpath)) else: print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath)) if params.out_path is None: raise ValueError( 'Output path must be specified to save the combined dataset to') out_path = realpath(params.out_path) parent_dir = dirname(out_path) if not pexists(parent_dir): os.mkdir(parent_dir) if len(add_path_list) < 2: raise ValueError('Need a minimum of datasets to combine!!') # removing duplicates (from regex etc) path_list = set(path_list) add_path_list = set(add_path_list) return path_list, params.meta_requested, params.summary_requested, \ add_path_list, out_path
[ "def", "parse_args", "(", ")", ":", "parser", "=", "get_parser", "(", ")", "if", "len", "(", "sys", ".", "argv", ")", "<", "2", ":", "parser", ".", "print_help", "(", ")", "logging", ".", "warning", "(", "'Too few arguments!'", ")", "parser", ".", "e...
Arg parser.
[ "Arg", "parser", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1689-L1742
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.data_and_labels
def data_and_labels(self): """ Dataset features and labels in a matrix form for learning. Also returns sample_ids in the same order. Returns ------- data_matrix : ndarray 2D array of shape [num_samples, num_features] with features corresponding row-wise to sample_ids labels : ndarray Array of numeric labels for each sample corresponding row-wise to sample_ids sample_ids : list List of sample ids """ sample_ids = np.array(self.keys) label_dict = self.labels matrix = np.full([self.num_samples, self.num_features], np.nan) labels = np.full([self.num_samples, 1], np.nan) for ix, sample in enumerate(sample_ids): matrix[ix, :] = self.__data[sample] labels[ix] = label_dict[sample] return matrix, np.ravel(labels), sample_ids
python
def data_and_labels(self): """ Dataset features and labels in a matrix form for learning. Also returns sample_ids in the same order. Returns ------- data_matrix : ndarray 2D array of shape [num_samples, num_features] with features corresponding row-wise to sample_ids labels : ndarray Array of numeric labels for each sample corresponding row-wise to sample_ids sample_ids : list List of sample ids """ sample_ids = np.array(self.keys) label_dict = self.labels matrix = np.full([self.num_samples, self.num_features], np.nan) labels = np.full([self.num_samples, 1], np.nan) for ix, sample in enumerate(sample_ids): matrix[ix, :] = self.__data[sample] labels[ix] = label_dict[sample] return matrix, np.ravel(labels), sample_ids
[ "def", "data_and_labels", "(", "self", ")", ":", "sample_ids", "=", "np", ".", "array", "(", "self", ".", "keys", ")", "label_dict", "=", "self", ".", "labels", "matrix", "=", "np", ".", "full", "(", "[", "self", ".", "num_samples", ",", "self", ".",...
Dataset features and labels in a matrix form for learning. Also returns sample_ids in the same order. Returns ------- data_matrix : ndarray 2D array of shape [num_samples, num_features] with features corresponding row-wise to sample_ids labels : ndarray Array of numeric labels for each sample corresponding row-wise to sample_ids sample_ids : list List of sample ids
[ "Dataset", "features", "and", "labels", "in", "a", "matrix", "form", "for", "learning", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L150-L176
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.data
def data(self, values, feature_names=None): """ Populates this dataset with the provided data. Usage of this method is discourage (unless you know what you are doing). Parameters ---------- values : dict dict of features keyed in by sample ids. feature_names : list of str New feature names for the new features, if available. Raises ------ ValueError If number of samples does not match the size of existing set, or If atleast one sample is not provided. """ if isinstance(values, dict): if self.__labels is not None and len(self.__labels) != len(values): raise ValueError( 'number of samples do not match the previously assigned labels') elif len(values) < 1: raise ValueError('There must be at least 1 sample in the dataset!') else: self.__data = values # update dimensionality # assuming all keys in dict have same len arrays self.__num_features = len(values[self.keys[0]]) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: self.feature_names = feature_names else: raise ValueError('data input must be a dictionary!')
python
def data(self, values, feature_names=None): """ Populates this dataset with the provided data. Usage of this method is discourage (unless you know what you are doing). Parameters ---------- values : dict dict of features keyed in by sample ids. feature_names : list of str New feature names for the new features, if available. Raises ------ ValueError If number of samples does not match the size of existing set, or If atleast one sample is not provided. """ if isinstance(values, dict): if self.__labels is not None and len(self.__labels) != len(values): raise ValueError( 'number of samples do not match the previously assigned labels') elif len(values) < 1: raise ValueError('There must be at least 1 sample in the dataset!') else: self.__data = values # update dimensionality # assuming all keys in dict have same len arrays self.__num_features = len(values[self.keys[0]]) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: self.feature_names = feature_names else: raise ValueError('data input must be a dictionary!')
[ "def", "data", "(", "self", ",", "values", ",", "feature_names", "=", "None", ")", ":", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "if", "self", ".", "__labels", "is", "not", "None", "and", "len", "(", "self", ".", "__labels", ")", "...
Populates this dataset with the provided data. Usage of this method is discourage (unless you know what you are doing). Parameters ---------- values : dict dict of features keyed in by sample ids. feature_names : list of str New feature names for the new features, if available. Raises ------ ValueError If number of samples does not match the size of existing set, or If atleast one sample is not provided.
[ "Populates", "this", "dataset", "with", "the", "provided", "data", ".", "Usage", "of", "this", "method", "is", "discourage", "(", "unless", "you", "know", "what", "you", "are", "doing", ")", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L180-L217
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.labels
def labels(self, values): """Class labels (such as 1, 2, -1, 'A', 'B' etc.) for each sample in the dataset.""" if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__labels = values else: raise ValueError('labels input must be a dictionary!')
python
def labels(self, values): """Class labels (such as 1, 2, -1, 'A', 'B' etc.) for each sample in the dataset.""" if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__labels = values else: raise ValueError('labels input must be a dictionary!')
[ "def", "labels", "(", "self", ",", "values", ")", ":", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "if", "self", ".", "__data", "is", "not", "None", "and", "len", "(", "self", ".", "__data", ")", "!=", "len", "(", "values", ")", ":"...
Class labels (such as 1, 2, -1, 'A', 'B' etc.) for each sample in the dataset.
[ "Class", "labels", "(", "such", "as", "1", "2", "-", "1", "A", "B", "etc", ".", ")", "for", "each", "sample", "in", "the", "dataset", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L229-L240
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.classes
def classes(self, values): """Classes setter.""" if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__classes = values else: raise ValueError('classes input must be a dictionary!')
python
def classes(self, values): """Classes setter.""" if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__classes = values else: raise ValueError('classes input must be a dictionary!')
[ "def", "classes", "(", "self", ",", "values", ")", ":", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "if", "self", ".", "__data", "is", "not", "None", "and", "len", "(", "self", ".", "__data", ")", "!=", "len", "(", "values", ")", ":...
Classes setter.
[ "Classes", "setter", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L253-L264
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.feature_names
def feature_names(self, names): "Stores the text labels for features" if len(names) != self.num_features: raise ValueError("Number of names do not match the number of features!") if not isinstance(names, (Sequence, np.ndarray, np.generic)): raise ValueError("Input is not a sequence. " "Ensure names are in the same order " "and length as features.") self.__feature_names = np.array(names)
python
def feature_names(self, names): "Stores the text labels for features" if len(names) != self.num_features: raise ValueError("Number of names do not match the number of features!") if not isinstance(names, (Sequence, np.ndarray, np.generic)): raise ValueError("Input is not a sequence. " "Ensure names are in the same order " "and length as features.") self.__feature_names = np.array(names)
[ "def", "feature_names", "(", "self", ",", "names", ")", ":", "if", "len", "(", "names", ")", "!=", "self", ".", "num_features", ":", "raise", "ValueError", "(", "\"Number of names do not match the number of features!\"", ")", "if", "not", "isinstance", "(", "nam...
Stores the text labels for features
[ "Stores", "the", "text", "labels", "for", "features" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L275-L285
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.glance
def glance(self, nitems=5): """Quick and partial glance of the data matrix. Parameters ---------- nitems : int Number of items to glance from the dataset. Default : 5 Returns ------- dict """ nitems = max([1, min([nitems, self.num_samples - 1])]) return self.__take(nitems, iter(self.__data.items()))
python
def glance(self, nitems=5): """Quick and partial glance of the data matrix. Parameters ---------- nitems : int Number of items to glance from the dataset. Default : 5 Returns ------- dict """ nitems = max([1, min([nitems, self.num_samples - 1])]) return self.__take(nitems, iter(self.__data.items()))
[ "def", "glance", "(", "self", ",", "nitems", "=", "5", ")", ":", "nitems", "=", "max", "(", "[", "1", ",", "min", "(", "[", "nitems", ",", "self", ".", "num_samples", "-", "1", "]", ")", "]", ")", "return", "self", ".", "__take", "(", "nitems",...
Quick and partial glance of the data matrix. Parameters ---------- nitems : int Number of items to glance from the dataset. Default : 5 Returns ------- dict
[ "Quick", "and", "partial", "glance", "of", "the", "data", "matrix", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L306-L321
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.summarize_classes
def summarize_classes(self): """ Summary of classes: names, numeric labels and sizes Returns ------- tuple : class_set, label_set, class_sizes class_set : list List of names of all the classes label_set : list Label for each class in class_set class_sizes : list Size of each class (number of samples) """ class_sizes = np.zeros(len(self.class_set)) for idx, cls in enumerate(self.class_set): class_sizes[idx] = self.class_sizes[cls] # TODO consider returning numeric label set e.g. for use in scikit-learn return self.class_set, self.label_set, class_sizes
python
def summarize_classes(self): """ Summary of classes: names, numeric labels and sizes Returns ------- tuple : class_set, label_set, class_sizes class_set : list List of names of all the classes label_set : list Label for each class in class_set class_sizes : list Size of each class (number of samples) """ class_sizes = np.zeros(len(self.class_set)) for idx, cls in enumerate(self.class_set): class_sizes[idx] = self.class_sizes[cls] # TODO consider returning numeric label set e.g. for use in scikit-learn return self.class_set, self.label_set, class_sizes
[ "def", "summarize_classes", "(", "self", ")", ":", "class_sizes", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "class_set", ")", ")", "for", "idx", ",", "cls", "in", "enumerate", "(", "self", ".", "class_set", ")", ":", "class_sizes", "[", ...
Summary of classes: names, numeric labels and sizes Returns ------- tuple : class_set, label_set, class_sizes class_set : list List of names of all the classes label_set : list Label for each class in class_set class_sizes : list Size of each class (number of samples)
[ "Summary", "of", "classes", ":", "names", "numeric", "labels", "and", "sizes" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L324-L346
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.check_features
def check_features(self, features): """ Method to ensure data to be added is not empty and vectorized. Parameters ---------- features : iterable Any data that can be converted to a numpy array. Returns ------- features : numpy array Flattened non-empty numpy array. Raises ------ ValueError If input data is empty. """ if not isinstance(features, np.ndarray): features = np.asarray(features) if features.size <= 0: raise ValueError('provided features are empty.') if features.ndim > 1: features = np.ravel(features) return features
python
def check_features(self, features): """ Method to ensure data to be added is not empty and vectorized. Parameters ---------- features : iterable Any data that can be converted to a numpy array. Returns ------- features : numpy array Flattened non-empty numpy array. Raises ------ ValueError If input data is empty. """ if not isinstance(features, np.ndarray): features = np.asarray(features) if features.size <= 0: raise ValueError('provided features are empty.') if features.ndim > 1: features = np.ravel(features) return features
[ "def", "check_features", "(", "self", ",", "features", ")", ":", "if", "not", "isinstance", "(", "features", ",", "np", ".", "ndarray", ")", ":", "features", "=", "np", ".", "asarray", "(", "features", ")", "if", "features", ".", "size", "<=", "0", "...
Method to ensure data to be added is not empty and vectorized. Parameters ---------- features : iterable Any data that can be converted to a numpy array. Returns ------- features : numpy array Flattened non-empty numpy array. Raises ------ ValueError If input data is empty.
[ "Method", "to", "ensure", "data", "to", "be", "added", "is", "not", "empty", "and", "vectorized", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L350-L379
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.add_sample
def add_sample(self, sample_id, features, label, class_id=None, overwrite=False, feature_names=None): """Adds a new sample to the dataset with its features, label and class ID. This is the preferred way to construct the dataset. Parameters ---------- sample_id : str, int The identifier that uniquely identifies this sample. features : list, ndarray The features for this sample label : int, str The label for this sample class_id : int, str The class for this sample. If not provided, label converted to a string becomes its ID. overwrite : bool If True, allows the overwite of features for an existing subject ID. Default : False. feature_names : list The names for each feature. Assumed to be in the same order as `features` Raises ------ ValueError If `sample_id` is already in the MLDataset (and overwrite=False), or If dimensionality of the current sample does not match the current, or If `feature_names` do not match existing names TypeError If sample to be added is of different data type compared to existing samples. """ if sample_id in self.__data and not overwrite: raise ValueError('{} already exists in this dataset!'.format(sample_id)) # ensuring there is always a class name, even when not provided by the user. # this is needed, in order for __str__ method to work. # TODO consider enforcing label to be numeric and class_id to be string # so portability with other packages is more uniform e.g. for use in scikit-learn if class_id is None: class_id = str(label) features = self.check_features(features) if self.num_samples <= 0: self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id self.__dtype = type(features) self.__num_features = features.size if isinstance(features, np.ndarray) else len( features) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: if self.__num_features != features.size: raise ValueError('dimensionality of this sample ({}) ' 'does not match existing samples ({})' ''.format(features.size, self.__num_features)) if not isinstance(features, self.__dtype): raise TypeError("Mismatched dtype. Provide {}".format(self.__dtype)) self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id if feature_names is not None: # if it was never set, allow it # class gets here when adding the first sample, # after dataset was initialized with empty constructor if self.__feature_names is None: self.__feature_names = np.array(feature_names) else: # if set already, ensure a match if not np.array_equal(self.feature_names, np.array(feature_names)): raise ValueError( "supplied feature names do not match the existing names!")
python
def add_sample(self, sample_id, features, label, class_id=None, overwrite=False, feature_names=None): """Adds a new sample to the dataset with its features, label and class ID. This is the preferred way to construct the dataset. Parameters ---------- sample_id : str, int The identifier that uniquely identifies this sample. features : list, ndarray The features for this sample label : int, str The label for this sample class_id : int, str The class for this sample. If not provided, label converted to a string becomes its ID. overwrite : bool If True, allows the overwite of features for an existing subject ID. Default : False. feature_names : list The names for each feature. Assumed to be in the same order as `features` Raises ------ ValueError If `sample_id` is already in the MLDataset (and overwrite=False), or If dimensionality of the current sample does not match the current, or If `feature_names` do not match existing names TypeError If sample to be added is of different data type compared to existing samples. """ if sample_id in self.__data and not overwrite: raise ValueError('{} already exists in this dataset!'.format(sample_id)) # ensuring there is always a class name, even when not provided by the user. # this is needed, in order for __str__ method to work. # TODO consider enforcing label to be numeric and class_id to be string # so portability with other packages is more uniform e.g. for use in scikit-learn if class_id is None: class_id = str(label) features = self.check_features(features) if self.num_samples <= 0: self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id self.__dtype = type(features) self.__num_features = features.size if isinstance(features, np.ndarray) else len( features) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: if self.__num_features != features.size: raise ValueError('dimensionality of this sample ({}) ' 'does not match existing samples ({})' ''.format(features.size, self.__num_features)) if not isinstance(features, self.__dtype): raise TypeError("Mismatched dtype. Provide {}".format(self.__dtype)) self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id if feature_names is not None: # if it was never set, allow it # class gets here when adding the first sample, # after dataset was initialized with empty constructor if self.__feature_names is None: self.__feature_names = np.array(feature_names) else: # if set already, ensure a match if not np.array_equal(self.feature_names, np.array(feature_names)): raise ValueError( "supplied feature names do not match the existing names!")
[ "def", "add_sample", "(", "self", ",", "sample_id", ",", "features", ",", "label", ",", "class_id", "=", "None", ",", "overwrite", "=", "False", ",", "feature_names", "=", "None", ")", ":", "if", "sample_id", "in", "self", ".", "__data", "and", "not", ...
Adds a new sample to the dataset with its features, label and class ID. This is the preferred way to construct the dataset. Parameters ---------- sample_id : str, int The identifier that uniquely identifies this sample. features : list, ndarray The features for this sample label : int, str The label for this sample class_id : int, str The class for this sample. If not provided, label converted to a string becomes its ID. overwrite : bool If True, allows the overwite of features for an existing subject ID. Default : False. feature_names : list The names for each feature. Assumed to be in the same order as `features` Raises ------ ValueError If `sample_id` is already in the MLDataset (and overwrite=False), or If dimensionality of the current sample does not match the current, or If `feature_names` do not match existing names TypeError If sample to be added is of different data type compared to existing samples.
[ "Adds", "a", "new", "sample", "to", "the", "dataset", "with", "its", "features", "label", "and", "class", "ID", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L383-L461
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.del_sample
def del_sample(self, sample_id): """ Method to remove a sample from the dataset. Parameters ---------- sample_id : str sample id to be removed. Raises ------ UserWarning If sample id to delete was not found in the dataset. """ if sample_id not in self.__data: warnings.warn('Sample to delete not found in the dataset - nothing to do.') else: self.__data.pop(sample_id) self.__classes.pop(sample_id) self.__labels.pop(sample_id) print('{} removed.'.format(sample_id))
python
def del_sample(self, sample_id): """ Method to remove a sample from the dataset. Parameters ---------- sample_id : str sample id to be removed. Raises ------ UserWarning If sample id to delete was not found in the dataset. """ if sample_id not in self.__data: warnings.warn('Sample to delete not found in the dataset - nothing to do.') else: self.__data.pop(sample_id) self.__classes.pop(sample_id) self.__labels.pop(sample_id) print('{} removed.'.format(sample_id))
[ "def", "del_sample", "(", "self", ",", "sample_id", ")", ":", "if", "sample_id", "not", "in", "self", ".", "__data", ":", "warnings", ".", "warn", "(", "'Sample to delete not found in the dataset - nothing to do.'", ")", "else", ":", "self", ".", "__data", ".", ...
Method to remove a sample from the dataset. Parameters ---------- sample_id : str sample id to be removed. Raises ------ UserWarning If sample id to delete was not found in the dataset.
[ "Method", "to", "remove", "a", "sample", "from", "the", "dataset", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L464-L485
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.get_feature_subset
def get_feature_subset(self, subset_idx): """ Returns the subset of features indexed numerically. Parameters ---------- subset_idx : list, ndarray List of indices to features to be returned Returns ------- MLDataset : MLDataset with subset of features requested. Raises ------ UnboundLocalError If input indices are out of bounds for the dataset. """ subset_idx = np.asarray(subset_idx) if not (max(subset_idx) < self.__num_features) and (min(subset_idx) >= 0): raise UnboundLocalError('indices out of range for the dataset. ' 'Max index: {} Min index : 0'.format( self.__num_features)) sub_data = {sample: features[subset_idx] for sample, features in self.__data.items()} new_descr = 'Subset features derived from: \n ' + self.__description subdataset = MLDataset(data=sub_data, labels=self.__labels, classes=self.__classes, description=new_descr, feature_names=self.__feature_names[subset_idx]) return subdataset
python
def get_feature_subset(self, subset_idx): """ Returns the subset of features indexed numerically. Parameters ---------- subset_idx : list, ndarray List of indices to features to be returned Returns ------- MLDataset : MLDataset with subset of features requested. Raises ------ UnboundLocalError If input indices are out of bounds for the dataset. """ subset_idx = np.asarray(subset_idx) if not (max(subset_idx) < self.__num_features) and (min(subset_idx) >= 0): raise UnboundLocalError('indices out of range for the dataset. ' 'Max index: {} Min index : 0'.format( self.__num_features)) sub_data = {sample: features[subset_idx] for sample, features in self.__data.items()} new_descr = 'Subset features derived from: \n ' + self.__description subdataset = MLDataset(data=sub_data, labels=self.__labels, classes=self.__classes, description=new_descr, feature_names=self.__feature_names[subset_idx]) return subdataset
[ "def", "get_feature_subset", "(", "self", ",", "subset_idx", ")", ":", "subset_idx", "=", "np", ".", "asarray", "(", "subset_idx", ")", "if", "not", "(", "max", "(", "subset_idx", ")", "<", "self", ".", "__num_features", ")", "and", "(", "min", "(", "s...
Returns the subset of features indexed numerically. Parameters ---------- subset_idx : list, ndarray List of indices to features to be returned Returns ------- MLDataset : MLDataset with subset of features requested. Raises ------ UnboundLocalError If input indices are out of bounds for the dataset.
[ "Returns", "the", "subset", "of", "features", "indexed", "numerically", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L488-L523
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.keys_with_value
def keys_with_value(dictionary, value): "Returns a subset of keys from the dict with the value supplied." subset = [key for key in dictionary if dictionary[key] == value] return subset
python
def keys_with_value(dictionary, value): "Returns a subset of keys from the dict with the value supplied." subset = [key for key in dictionary if dictionary[key] == value] return subset
[ "def", "keys_with_value", "(", "dictionary", ",", "value", ")", ":", "subset", "=", "[", "key", "for", "key", "in", "dictionary", "if", "dictionary", "[", "key", "]", "==", "value", "]", "return", "subset" ]
Returns a subset of keys from the dict with the value supplied.
[ "Returns", "a", "subset", "of", "keys", "from", "the", "dict", "with", "the", "value", "supplied", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L527-L532
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.get_class
def get_class(self, class_id): """ Returns a smaller dataset belonging to the requested classes. Parameters ---------- class_id : str or list identifier(s) of the class(es) to be returned. Returns ------- MLDataset With subset of samples belonging to the given class(es). Raises ------ ValueError If one or more of the requested classes do not exist in this dataset. If the specified id is empty or None """ if class_id in [None, '']: raise ValueError("class id can not be empty or None.") if isinstance(class_id, str): class_ids = [class_id, ] else: class_ids = class_id non_existent = set(self.class_set).intersection(set(class_ids)) if len(non_existent) < 1: raise ValueError( 'These classes {} do not exist in this dataset.'.format(non_existent)) subsets = list() for class_id in class_ids: subsets_this_class = self.keys_with_value(self.__classes, class_id) subsets.extend(subsets_this_class) return self.get_subset(subsets)
python
def get_class(self, class_id): """ Returns a smaller dataset belonging to the requested classes. Parameters ---------- class_id : str or list identifier(s) of the class(es) to be returned. Returns ------- MLDataset With subset of samples belonging to the given class(es). Raises ------ ValueError If one or more of the requested classes do not exist in this dataset. If the specified id is empty or None """ if class_id in [None, '']: raise ValueError("class id can not be empty or None.") if isinstance(class_id, str): class_ids = [class_id, ] else: class_ids = class_id non_existent = set(self.class_set).intersection(set(class_ids)) if len(non_existent) < 1: raise ValueError( 'These classes {} do not exist in this dataset.'.format(non_existent)) subsets = list() for class_id in class_ids: subsets_this_class = self.keys_with_value(self.__classes, class_id) subsets.extend(subsets_this_class) return self.get_subset(subsets)
[ "def", "get_class", "(", "self", ",", "class_id", ")", ":", "if", "class_id", "in", "[", "None", ",", "''", "]", ":", "raise", "ValueError", "(", "\"class id can not be empty or None.\"", ")", "if", "isinstance", "(", "class_id", ",", "str", ")", ":", "cla...
Returns a smaller dataset belonging to the requested classes. Parameters ---------- class_id : str or list identifier(s) of the class(es) to be returned. Returns ------- MLDataset With subset of samples belonging to the given class(es). Raises ------ ValueError If one or more of the requested classes do not exist in this dataset. If the specified id is empty or None
[ "Returns", "a", "smaller", "dataset", "belonging", "to", "the", "requested", "classes", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L535-L574
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.transform
def transform(self, func, func_description=None): """ Applies a given a function to the features of each subject and returns a new dataset with other info unchanged. Parameters ---------- func : callable A valid callable that takes in a single ndarray and returns a single ndarray. Ensure the transformed dimensionality must be the same for all subjects. If your function requires more than one argument, use `functools.partial` to freeze all the arguments except the features for the subject. func_description : str, optional Human readable description of the given function. Returns ------- xfm_ds : MLDataset with features obtained from subject-wise transform Raises ------ TypeError If given func is not a callable ValueError If transformation of any of the subjects features raises an exception. Examples -------- Simple: .. code-block:: python from pyradigm import MLDataset thickness = MLDataset(in_path='ADNI_thickness.csv') pcg_thickness = thickness.apply_xfm(func=get_pcg, description = 'applying ROI mask for PCG') pcg_median = pcg_thickness.apply_xfm(func=np.median, description='median per subject') Complex example with function taking more than one argument: .. code-block:: python from pyradigm import MLDataset from functools import partial import hiwenet thickness = MLDataset(in_path='ADNI_thickness.csv') roi_membership = read_roi_membership() hw = partial(hiwenet, groups = roi_membership) thickness_hiwenet = thickness.transform(func=hw, description = 'histogram weighted networks') median_thk_hiwenet = thickness_hiwenet.transform(func=np.median, description='median per subject') """ if not callable(func): raise TypeError('Given function {} is not a callable'.format(func)) xfm_ds = MLDataset() for sample, data in self.__data.items(): try: xfm_data = func(data) except: print('Unable to transform features for {}. Quitting.'.format(sample)) raise xfm_ds.add_sample(sample, xfm_data, label=self.__labels[sample], class_id=self.__classes[sample]) xfm_ds.description = "{}\n{}".format(func_description, self.__description) return xfm_ds
python
def transform(self, func, func_description=None): """ Applies a given a function to the features of each subject and returns a new dataset with other info unchanged. Parameters ---------- func : callable A valid callable that takes in a single ndarray and returns a single ndarray. Ensure the transformed dimensionality must be the same for all subjects. If your function requires more than one argument, use `functools.partial` to freeze all the arguments except the features for the subject. func_description : str, optional Human readable description of the given function. Returns ------- xfm_ds : MLDataset with features obtained from subject-wise transform Raises ------ TypeError If given func is not a callable ValueError If transformation of any of the subjects features raises an exception. Examples -------- Simple: .. code-block:: python from pyradigm import MLDataset thickness = MLDataset(in_path='ADNI_thickness.csv') pcg_thickness = thickness.apply_xfm(func=get_pcg, description = 'applying ROI mask for PCG') pcg_median = pcg_thickness.apply_xfm(func=np.median, description='median per subject') Complex example with function taking more than one argument: .. code-block:: python from pyradigm import MLDataset from functools import partial import hiwenet thickness = MLDataset(in_path='ADNI_thickness.csv') roi_membership = read_roi_membership() hw = partial(hiwenet, groups = roi_membership) thickness_hiwenet = thickness.transform(func=hw, description = 'histogram weighted networks') median_thk_hiwenet = thickness_hiwenet.transform(func=np.median, description='median per subject') """ if not callable(func): raise TypeError('Given function {} is not a callable'.format(func)) xfm_ds = MLDataset() for sample, data in self.__data.items(): try: xfm_data = func(data) except: print('Unable to transform features for {}. Quitting.'.format(sample)) raise xfm_ds.add_sample(sample, xfm_data, label=self.__labels[sample], class_id=self.__classes[sample]) xfm_ds.description = "{}\n{}".format(func_description, self.__description) return xfm_ds
[ "def", "transform", "(", "self", ",", "func", ",", "func_description", "=", "None", ")", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "TypeError", "(", "'Given function {} is not a callable'", ".", "format", "(", "func", ")", ")", "xfm_ds", ...
Applies a given a function to the features of each subject and returns a new dataset with other info unchanged. Parameters ---------- func : callable A valid callable that takes in a single ndarray and returns a single ndarray. Ensure the transformed dimensionality must be the same for all subjects. If your function requires more than one argument, use `functools.partial` to freeze all the arguments except the features for the subject. func_description : str, optional Human readable description of the given function. Returns ------- xfm_ds : MLDataset with features obtained from subject-wise transform Raises ------ TypeError If given func is not a callable ValueError If transformation of any of the subjects features raises an exception. Examples -------- Simple: .. code-block:: python from pyradigm import MLDataset thickness = MLDataset(in_path='ADNI_thickness.csv') pcg_thickness = thickness.apply_xfm(func=get_pcg, description = 'applying ROI mask for PCG') pcg_median = pcg_thickness.apply_xfm(func=np.median, description='median per subject') Complex example with function taking more than one argument: .. code-block:: python from pyradigm import MLDataset from functools import partial import hiwenet thickness = MLDataset(in_path='ADNI_thickness.csv') roi_membership = read_roi_membership() hw = partial(hiwenet, groups = roi_membership) thickness_hiwenet = thickness.transform(func=hw, description = 'histogram weighted networks') median_thk_hiwenet = thickness_hiwenet.transform(func=np.median, description='median per subject')
[ "Applies", "a", "given", "a", "function", "to", "the", "features", "of", "each", "subject", "and", "returns", "a", "new", "dataset", "with", "other", "info", "unchanged", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L577-L654
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.random_subset_ids_by_count
def random_subset_ids_by_count(self, count_per_class=1): """ Returns a random subset of sample ids of specified size by count, within each class. Parameters ---------- count_per_class : int Exact number of samples per each class. Returns ------- subset : list Combined list of sample ids from all classes. """ class_sizes = self.class_sizes subsets = list() if count_per_class < 1: warnings.warn('Atleast one sample must be selected from each class') return list() elif count_per_class >= self.num_samples: warnings.warn('All samples requested - returning a copy!') return self.keys # seeding the random number generator # random.seed(random_seed) for class_id, class_size in class_sizes.items(): # samples belonging to the class this_class = self.keys_with_value(self.classes, class_id) # shuffling the sample order; shuffling works in-place! random.shuffle(this_class) # clipping the range to [0, class_size] subset_size_this_class = max(0, min(class_size, count_per_class)) if subset_size_this_class < 1 or this_class is None: # warning if none were selected warnings.warn('No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:count_per_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warnings.warn('Zero samples were selected. Returning an empty list!') return list()
python
def random_subset_ids_by_count(self, count_per_class=1): """ Returns a random subset of sample ids of specified size by count, within each class. Parameters ---------- count_per_class : int Exact number of samples per each class. Returns ------- subset : list Combined list of sample ids from all classes. """ class_sizes = self.class_sizes subsets = list() if count_per_class < 1: warnings.warn('Atleast one sample must be selected from each class') return list() elif count_per_class >= self.num_samples: warnings.warn('All samples requested - returning a copy!') return self.keys # seeding the random number generator # random.seed(random_seed) for class_id, class_size in class_sizes.items(): # samples belonging to the class this_class = self.keys_with_value(self.classes, class_id) # shuffling the sample order; shuffling works in-place! random.shuffle(this_class) # clipping the range to [0, class_size] subset_size_this_class = max(0, min(class_size, count_per_class)) if subset_size_this_class < 1 or this_class is None: # warning if none were selected warnings.warn('No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:count_per_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warnings.warn('Zero samples were selected. Returning an empty list!') return list()
[ "def", "random_subset_ids_by_count", "(", "self", ",", "count_per_class", "=", "1", ")", ":", "class_sizes", "=", "self", ".", "class_sizes", "subsets", "=", "list", "(", ")", "if", "count_per_class", "<", "1", ":", "warnings", ".", "warn", "(", "'Atleast on...
Returns a random subset of sample ids of specified size by count, within each class. Parameters ---------- count_per_class : int Exact number of samples per each class. Returns ------- subset : list Combined list of sample ids from all classes.
[ "Returns", "a", "random", "subset", "of", "sample", "ids", "of", "specified", "size", "by", "count", "within", "each", "class", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L716-L765
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.random_subset_ids
def random_subset_ids(self, perc_per_class=0.5): """ Returns a random subset of sample ids (size in percentage) within each class. Parameters ---------- perc_per_class : float Fraction of samples per class Returns ------- subset : list Combined list of sample ids from all classes. Raises ------ ValueError If no subjects from one or more classes were selected. UserWarning If an empty or full dataset is requested. """ class_sizes = self.class_sizes subsets = list() if perc_per_class <= 0.0: warnings.warn('Zero percentage requested - returning an empty dataset!') return list() elif perc_per_class >= 1.0: warnings.warn('Full or a larger dataset requested - returning a copy!') return self.keys # seeding the random number generator # random.seed(random_seed) for class_id, class_size in class_sizes.items(): # samples belonging to the class this_class = self.keys_with_value(self.classes, class_id) # shuffling the sample order; shuffling works in-place! random.shuffle(this_class) # calculating the requested number of samples subset_size_this_class = np.int64(np.floor(class_size * perc_per_class)) # clipping the range to [1, n] subset_size_this_class = max(1, min(class_size, subset_size_this_class)) if subset_size_this_class < 1 or len(this_class) < 1 or this_class is None: # warning if none were selected raise ValueError( 'No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:subset_size_this_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warnings.warn('Zero samples were selected. Returning an empty list!') return list()
python
def random_subset_ids(self, perc_per_class=0.5): """ Returns a random subset of sample ids (size in percentage) within each class. Parameters ---------- perc_per_class : float Fraction of samples per class Returns ------- subset : list Combined list of sample ids from all classes. Raises ------ ValueError If no subjects from one or more classes were selected. UserWarning If an empty or full dataset is requested. """ class_sizes = self.class_sizes subsets = list() if perc_per_class <= 0.0: warnings.warn('Zero percentage requested - returning an empty dataset!') return list() elif perc_per_class >= 1.0: warnings.warn('Full or a larger dataset requested - returning a copy!') return self.keys # seeding the random number generator # random.seed(random_seed) for class_id, class_size in class_sizes.items(): # samples belonging to the class this_class = self.keys_with_value(self.classes, class_id) # shuffling the sample order; shuffling works in-place! random.shuffle(this_class) # calculating the requested number of samples subset_size_this_class = np.int64(np.floor(class_size * perc_per_class)) # clipping the range to [1, n] subset_size_this_class = max(1, min(class_size, subset_size_this_class)) if subset_size_this_class < 1 or len(this_class) < 1 or this_class is None: # warning if none were selected raise ValueError( 'No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:subset_size_this_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warnings.warn('Zero samples were selected. Returning an empty list!') return list()
[ "def", "random_subset_ids", "(", "self", ",", "perc_per_class", "=", "0.5", ")", ":", "class_sizes", "=", "self", ".", "class_sizes", "subsets", "=", "list", "(", ")", "if", "perc_per_class", "<=", "0.0", ":", "warnings", ".", "warn", "(", "'Zero percentage ...
Returns a random subset of sample ids (size in percentage) within each class. Parameters ---------- perc_per_class : float Fraction of samples per class Returns ------- subset : list Combined list of sample ids from all classes. Raises ------ ValueError If no subjects from one or more classes were selected. UserWarning If an empty or full dataset is requested.
[ "Returns", "a", "random", "subset", "of", "sample", "ids", "(", "size", "in", "percentage", ")", "within", "each", "class", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L768-L825
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.random_subset
def random_subset(self, perc_in_class=0.5): """ Returns a random sub-dataset (of specified size by percentage) within each class. Parameters ---------- perc_in_class : float Fraction of samples to be taken from each class. Returns ------- subdataset : MLDataset random sub-dataset of specified size. """ subsets = self.random_subset_ids(perc_in_class) if len(subsets) > 0: return self.get_subset(subsets) else: warnings.warn('Zero samples were selected. Returning an empty dataset!') return MLDataset()
python
def random_subset(self, perc_in_class=0.5): """ Returns a random sub-dataset (of specified size by percentage) within each class. Parameters ---------- perc_in_class : float Fraction of samples to be taken from each class. Returns ------- subdataset : MLDataset random sub-dataset of specified size. """ subsets = self.random_subset_ids(perc_in_class) if len(subsets) > 0: return self.get_subset(subsets) else: warnings.warn('Zero samples were selected. Returning an empty dataset!') return MLDataset()
[ "def", "random_subset", "(", "self", ",", "perc_in_class", "=", "0.5", ")", ":", "subsets", "=", "self", ".", "random_subset_ids", "(", "perc_in_class", ")", "if", "len", "(", "subsets", ")", ">", "0", ":", "return", "self", ".", "get_subset", "(", "subs...
Returns a random sub-dataset (of specified size by percentage) within each class. Parameters ---------- perc_in_class : float Fraction of samples to be taken from each class. Returns ------- subdataset : MLDataset random sub-dataset of specified size.
[ "Returns", "a", "random", "sub", "-", "dataset", "(", "of", "specified", "size", "by", "percentage", ")", "within", "each", "class", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L828-L849
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.sample_ids_in_class
def sample_ids_in_class(self, class_id): """ Returns a list of sample ids belonging to a given class. Parameters ---------- class_id : str class id to query. Returns ------- subset_ids : list List of sample ids belonging to a given class. """ # subset_ids = [sid for sid in self.keys if self.classes[sid] == class_id] subset_ids = self.keys_with_value(self.classes, class_id) return subset_ids
python
def sample_ids_in_class(self, class_id): """ Returns a list of sample ids belonging to a given class. Parameters ---------- class_id : str class id to query. Returns ------- subset_ids : list List of sample ids belonging to a given class. """ # subset_ids = [sid for sid in self.keys if self.classes[sid] == class_id] subset_ids = self.keys_with_value(self.classes, class_id) return subset_ids
[ "def", "sample_ids_in_class", "(", "self", ",", "class_id", ")", ":", "# subset_ids = [sid for sid in self.keys if self.classes[sid] == class_id]", "subset_ids", "=", "self", ".", "keys_with_value", "(", "self", ".", "classes", ",", "class_id", ")", "return", "subset_ids"...
Returns a list of sample ids belonging to a given class. Parameters ---------- class_id : str class id to query. Returns ------- subset_ids : list List of sample ids belonging to a given class.
[ "Returns", "a", "list", "of", "sample", "ids", "belonging", "to", "a", "given", "class", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L852-L870
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.get_subset
def get_subset(self, subset_ids): """ Returns a smaller dataset identified by their keys/sample IDs. Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- sub-dataset : MLDataset sub-dataset containing only requested sample IDs. """ num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if subset_ids is not None and num_existing_keys > 0: # ensure items are added to data, labels etc in the same order of sample IDs # TODO come up with a way to do this even when not using OrderedDict() # putting the access of data, labels and classes in the same loop would # ensure there is correspondence across the three attributes of the class data = self.__get_subset_from_dict(self.__data, subset_ids) labels = self.__get_subset_from_dict(self.__labels, subset_ids) if self.__classes is not None: classes = self.__get_subset_from_dict(self.__classes, subset_ids) else: classes = None subdataset = MLDataset(data=data, labels=labels, classes=classes) # Appending the history subdataset.description += '\n Subset derived from: ' + self.description subdataset.feature_names = self.__feature_names subdataset.__dtype = self.dtype return subdataset else: warnings.warn('subset of IDs requested do not exist in the dataset!') return MLDataset()
python
def get_subset(self, subset_ids): """ Returns a smaller dataset identified by their keys/sample IDs. Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- sub-dataset : MLDataset sub-dataset containing only requested sample IDs. """ num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if subset_ids is not None and num_existing_keys > 0: # ensure items are added to data, labels etc in the same order of sample IDs # TODO come up with a way to do this even when not using OrderedDict() # putting the access of data, labels and classes in the same loop would # ensure there is correspondence across the three attributes of the class data = self.__get_subset_from_dict(self.__data, subset_ids) labels = self.__get_subset_from_dict(self.__labels, subset_ids) if self.__classes is not None: classes = self.__get_subset_from_dict(self.__classes, subset_ids) else: classes = None subdataset = MLDataset(data=data, labels=labels, classes=classes) # Appending the history subdataset.description += '\n Subset derived from: ' + self.description subdataset.feature_names = self.__feature_names subdataset.__dtype = self.dtype return subdataset else: warnings.warn('subset of IDs requested do not exist in the dataset!') return MLDataset()
[ "def", "get_subset", "(", "self", ",", "subset_ids", ")", ":", "num_existing_keys", "=", "sum", "(", "[", "1", "for", "key", "in", "subset_ids", "if", "key", "in", "self", ".", "__data", "]", ")", "if", "subset_ids", "is", "not", "None", "and", "num_ex...
Returns a smaller dataset identified by their keys/sample IDs. Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- sub-dataset : MLDataset sub-dataset containing only requested sample IDs.
[ "Returns", "a", "smaller", "dataset", "identified", "by", "their", "keys", "/", "sample", "IDs", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L873-L909
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.get_data_matrix_in_order
def get_data_matrix_in_order(self, subset_ids): """ Returns a numpy array of features, rows in the same order as subset_ids Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- matrix : ndarray Matrix of features, for each id in subset_ids, in order. """ if len(subset_ids) < 1: warnings.warn('subset must have atleast one ID - returning empty matrix!') return np.empty((0, 0)) if isinstance(subset_ids, set): raise TypeError('Input set is not ordered, hence can not guarantee order! ' 'Must provide a list or tuple.') if isinstance(subset_ids, str): subset_ids = [subset_ids, ] num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if num_existing_keys < len(subset_ids): raise ValueError('One or more IDs from subset do not exist in the dataset!') matrix = np.full((num_existing_keys, self.num_features), np.nan) for idx, sid in enumerate(subset_ids): matrix[idx, :] = self.__data[sid] return matrix
python
def get_data_matrix_in_order(self, subset_ids): """ Returns a numpy array of features, rows in the same order as subset_ids Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- matrix : ndarray Matrix of features, for each id in subset_ids, in order. """ if len(subset_ids) < 1: warnings.warn('subset must have atleast one ID - returning empty matrix!') return np.empty((0, 0)) if isinstance(subset_ids, set): raise TypeError('Input set is not ordered, hence can not guarantee order! ' 'Must provide a list or tuple.') if isinstance(subset_ids, str): subset_ids = [subset_ids, ] num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if num_existing_keys < len(subset_ids): raise ValueError('One or more IDs from subset do not exist in the dataset!') matrix = np.full((num_existing_keys, self.num_features), np.nan) for idx, sid in enumerate(subset_ids): matrix[idx, :] = self.__data[sid] return matrix
[ "def", "get_data_matrix_in_order", "(", "self", ",", "subset_ids", ")", ":", "if", "len", "(", "subset_ids", ")", "<", "1", ":", "warnings", ".", "warn", "(", "'subset must have atleast one ID - returning empty matrix!'", ")", "return", "np", ".", "empty", "(", ...
Returns a numpy array of features, rows in the same order as subset_ids Parameters ---------- subset_ids : list List od sample IDs to extracted from the dataset. Returns ------- matrix : ndarray Matrix of features, for each id in subset_ids, in order.
[ "Returns", "a", "numpy", "array", "of", "features", "rows", "in", "the", "same", "order", "as", "subset_ids" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L912-L946
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.get
def get(self, item, not_found_value=None): "Method like dict.get() which can return specified value if key not found" if item in self.keys: return self.__data[item] else: return not_found_value
python
def get(self, item, not_found_value=None): "Method like dict.get() which can return specified value if key not found" if item in self.keys: return self.__data[item] else: return not_found_value
[ "def", "get", "(", "self", ",", "item", ",", "not_found_value", "=", "None", ")", ":", "if", "item", "in", "self", ".", "keys", ":", "return", "self", ".", "__data", "[", "item", "]", "else", ":", "return", "not_found_value" ]
Method like dict.get() which can return specified value if key not found
[ "Method", "like", "dict", ".", "get", "()", "which", "can", "return", "specified", "value", "if", "key", "not", "found" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L957-L963
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.label_set
def label_set(self): """Set of labels in the dataset corresponding to class_set.""" label_set = list() for class_ in self.class_set: samples_in_class = self.sample_ids_in_class(class_) label_set.append(self.labels[samples_in_class[0]]) return label_set
python
def label_set(self): """Set of labels in the dataset corresponding to class_set.""" label_set = list() for class_ in self.class_set: samples_in_class = self.sample_ids_in_class(class_) label_set.append(self.labels[samples_in_class[0]]) return label_set
[ "def", "label_set", "(", "self", ")", ":", "label_set", "=", "list", "(", ")", "for", "class_", "in", "self", ".", "class_set", ":", "samples_in_class", "=", "self", ".", "sample_ids_in_class", "(", "class_", ")", "label_set", ".", "append", "(", "self", ...
Set of labels in the dataset corresponding to class_set.
[ "Set", "of", "labels", "in", "the", "dataset", "corresponding", "to", "class_set", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1066-L1073
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.add_classes
def add_classes(self, classes): """ Helper to rename the classes, if provided by a dict keyed in by the orignal keys Parameters ---------- classes : dict Dict of class named keyed in by sample IDs. Raises ------ TypeError If classes is not a dict. ValueError If all samples in dataset are not present in input dict, or one of they samples in input is not recognized. """ if not isinstance(classes, dict): raise TypeError('Input classes is not a dict!') if not len(classes) == self.num_samples: raise ValueError('Too few items - need {} keys'.format(self.num_samples)) if not all([key in self.keys for key in classes]): raise ValueError('One or more unrecognized keys!') self.__classes = classes
python
def add_classes(self, classes): """ Helper to rename the classes, if provided by a dict keyed in by the orignal keys Parameters ---------- classes : dict Dict of class named keyed in by sample IDs. Raises ------ TypeError If classes is not a dict. ValueError If all samples in dataset are not present in input dict, or one of they samples in input is not recognized. """ if not isinstance(classes, dict): raise TypeError('Input classes is not a dict!') if not len(classes) == self.num_samples: raise ValueError('Too few items - need {} keys'.format(self.num_samples)) if not all([key in self.keys for key in classes]): raise ValueError('One or more unrecognized keys!') self.__classes = classes
[ "def", "add_classes", "(", "self", ",", "classes", ")", ":", "if", "not", "isinstance", "(", "classes", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Input classes is not a dict!'", ")", "if", "not", "len", "(", "classes", ")", "==", "self", ".", "...
Helper to rename the classes, if provided by a dict keyed in by the orignal keys Parameters ---------- classes : dict Dict of class named keyed in by sample IDs. Raises ------ TypeError If classes is not a dict. ValueError If all samples in dataset are not present in input dict, or one of they samples in input is not recognized.
[ "Helper", "to", "rename", "the", "classes", "if", "provided", "by", "a", "dict", "keyed", "in", "by", "the", "orignal", "keys" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1076-L1100
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.__copy
def __copy(self, other): """Copy constructor.""" self.__data = copy.deepcopy(other.data) self.__classes = copy.deepcopy(other.classes) self.__labels = copy.deepcopy(other.labels) self.__dtype = copy.deepcopy(other.dtype) self.__description = copy.deepcopy(other.description) self.__feature_names = copy.deepcopy(other.feature_names) self.__num_features = copy.deepcopy(other.num_features) return self
python
def __copy(self, other): """Copy constructor.""" self.__data = copy.deepcopy(other.data) self.__classes = copy.deepcopy(other.classes) self.__labels = copy.deepcopy(other.labels) self.__dtype = copy.deepcopy(other.dtype) self.__description = copy.deepcopy(other.description) self.__feature_names = copy.deepcopy(other.feature_names) self.__num_features = copy.deepcopy(other.num_features) return self
[ "def", "__copy", "(", "self", ",", "other", ")", ":", "self", ".", "__data", "=", "copy", ".", "deepcopy", "(", "other", ".", "data", ")", "self", ".", "__classes", "=", "copy", ".", "deepcopy", "(", "other", ".", "classes", ")", "self", ".", "__la...
Copy constructor.
[ "Copy", "constructor", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1191-L1201
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.__load
def __load(self, path): """Method to load the serialized dataset from disk.""" try: path = os.path.abspath(path) with open(path, 'rb') as df: # loaded_dataset = pickle.load(df) self.__data, self.__classes, self.__labels, \ self.__dtype, self.__description, \ self.__num_features, self.__feature_names = pickle.load(df) # ensure the loaded dataset is valid self.__validate(self.__data, self.__classes, self.__labels) except IOError as ioe: raise IOError('Unable to read the dataset from file: {}', format(ioe)) except: raise
python
def __load(self, path): """Method to load the serialized dataset from disk.""" try: path = os.path.abspath(path) with open(path, 'rb') as df: # loaded_dataset = pickle.load(df) self.__data, self.__classes, self.__labels, \ self.__dtype, self.__description, \ self.__num_features, self.__feature_names = pickle.load(df) # ensure the loaded dataset is valid self.__validate(self.__data, self.__classes, self.__labels) except IOError as ioe: raise IOError('Unable to read the dataset from file: {}', format(ioe)) except: raise
[ "def", "__load", "(", "self", ",", "path", ")", ":", "try", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "df", ":", "# loaded_dataset = pickle.load(df)", "self", ".", "__d...
Method to load the serialized dataset from disk.
[ "Method", "to", "load", "the", "serialized", "dataset", "from", "disk", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1204-L1220
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.__load_arff
def __load_arff(self, arff_path, encode_nonnumeric=False): """Loads a given dataset saved in Weka's ARFF format. """ try: from scipy.io.arff import loadarff arff_data, arff_meta = loadarff(arff_path) except: raise ValueError('Error loading the ARFF dataset!') attr_names = arff_meta.names()[:-1] # last column is class attr_types = arff_meta.types()[:-1] if not encode_nonnumeric: # ensure all the attributes are numeric uniq_types = set(attr_types) if 'numeric' not in uniq_types: raise ValueError( 'Currently only numeric attributes in ARFF are supported!') non_numeric = uniq_types.difference({'numeric'}) if len(non_numeric) > 0: raise ValueError('Non-numeric features provided ({}), ' 'without requesting encoding to numeric. ' 'Try setting encode_nonnumeric=True ' 'or encode features to numeric!'.format(non_numeric)) else: raise NotImplementedError( 'encoding non-numeric features to numeric is not implemented yet! ' 'Encode features beforing to ARFF.') self.__description = arff_meta.name # to enable it as a label e.g. in neuropredict # initializing the key containers, before calling self.add_sample self.__data = OrderedDict() self.__labels = OrderedDict() self.__classes = OrderedDict() num_samples = len(arff_data) num_digits = len(str(num_samples)) make_id = lambda index: 'row{index:0{nd}d}'.format(index=index, nd=num_digits) sample_classes = [cls.decode('utf-8') for cls in arff_data['class']] class_set = set(sample_classes) label_dict = dict() # encoding class names to labels 1 to n for ix, cls in enumerate(class_set): label_dict[cls] = ix + 1 for index in range(num_samples): sample = arff_data.take([index])[0].tolist() sample_attrs = sample[:-1] sample_class = sample[-1].decode('utf-8') self.add_sample(sample_id=make_id(index), # ARFF rows do not have an ID features=sample_attrs, label=label_dict[sample_class], class_id=sample_class) # not necessary to set feature_names=attr_names for each sample, # as we do it globally after loop self.__feature_names = attr_names return
python
def __load_arff(self, arff_path, encode_nonnumeric=False): """Loads a given dataset saved in Weka's ARFF format. """ try: from scipy.io.arff import loadarff arff_data, arff_meta = loadarff(arff_path) except: raise ValueError('Error loading the ARFF dataset!') attr_names = arff_meta.names()[:-1] # last column is class attr_types = arff_meta.types()[:-1] if not encode_nonnumeric: # ensure all the attributes are numeric uniq_types = set(attr_types) if 'numeric' not in uniq_types: raise ValueError( 'Currently only numeric attributes in ARFF are supported!') non_numeric = uniq_types.difference({'numeric'}) if len(non_numeric) > 0: raise ValueError('Non-numeric features provided ({}), ' 'without requesting encoding to numeric. ' 'Try setting encode_nonnumeric=True ' 'or encode features to numeric!'.format(non_numeric)) else: raise NotImplementedError( 'encoding non-numeric features to numeric is not implemented yet! ' 'Encode features beforing to ARFF.') self.__description = arff_meta.name # to enable it as a label e.g. in neuropredict # initializing the key containers, before calling self.add_sample self.__data = OrderedDict() self.__labels = OrderedDict() self.__classes = OrderedDict() num_samples = len(arff_data) num_digits = len(str(num_samples)) make_id = lambda index: 'row{index:0{nd}d}'.format(index=index, nd=num_digits) sample_classes = [cls.decode('utf-8') for cls in arff_data['class']] class_set = set(sample_classes) label_dict = dict() # encoding class names to labels 1 to n for ix, cls in enumerate(class_set): label_dict[cls] = ix + 1 for index in range(num_samples): sample = arff_data.take([index])[0].tolist() sample_attrs = sample[:-1] sample_class = sample[-1].decode('utf-8') self.add_sample(sample_id=make_id(index), # ARFF rows do not have an ID features=sample_attrs, label=label_dict[sample_class], class_id=sample_class) # not necessary to set feature_names=attr_names for each sample, # as we do it globally after loop self.__feature_names = attr_names return
[ "def", "__load_arff", "(", "self", ",", "arff_path", ",", "encode_nonnumeric", "=", "False", ")", ":", "try", ":", "from", "scipy", ".", "io", ".", "arff", "import", "loadarff", "arff_data", ",", "arff_meta", "=", "loadarff", "(", "arff_path", ")", "except...
Loads a given dataset saved in Weka's ARFF format.
[ "Loads", "a", "given", "dataset", "saved", "in", "Weka", "s", "ARFF", "format", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1223-L1281
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.save
def save(self, file_path): """ Method to save the dataset to disk. Parameters ---------- file_path : str File path to save the current dataset to Raises ------ IOError If saving to disk is not successful. """ # TODO need a file format that is flexible and efficient to allow the following: # 1) being able to read just meta info without having to load the ENTIRE dataset # i.e. use case: compatibility check with #subjects, ids and their classes # 2) random access layout: being able to read features for a single subject! try: file_path = os.path.abspath(file_path) with open(file_path, 'wb') as df: # pickle.dump(self, df) pickle.dump((self.__data, self.__classes, self.__labels, self.__dtype, self.__description, self.__num_features, self.__feature_names), df) return except IOError as ioe: raise IOError('Unable to save the dataset to file: {}', format(ioe)) except: raise
python
def save(self, file_path): """ Method to save the dataset to disk. Parameters ---------- file_path : str File path to save the current dataset to Raises ------ IOError If saving to disk is not successful. """ # TODO need a file format that is flexible and efficient to allow the following: # 1) being able to read just meta info without having to load the ENTIRE dataset # i.e. use case: compatibility check with #subjects, ids and their classes # 2) random access layout: being able to read features for a single subject! try: file_path = os.path.abspath(file_path) with open(file_path, 'wb') as df: # pickle.dump(self, df) pickle.dump((self.__data, self.__classes, self.__labels, self.__dtype, self.__description, self.__num_features, self.__feature_names), df) return except IOError as ioe: raise IOError('Unable to save the dataset to file: {}', format(ioe)) except: raise
[ "def", "save", "(", "self", ",", "file_path", ")", ":", "# TODO need a file format that is flexible and efficient to allow the following:", "# 1) being able to read just meta info without having to load the ENTIRE dataset", "# i.e. use case: compatibility check with #subjects, ids and th...
Method to save the dataset to disk. Parameters ---------- file_path : str File path to save the current dataset to Raises ------ IOError If saving to disk is not successful.
[ "Method", "to", "save", "the", "dataset", "to", "disk", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1284-L1317
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.__validate
def __validate(data, classes, labels): "Validator of inputs." if not isinstance(data, dict): raise TypeError( 'data must be a dict! keys: sample ID or any unique identifier') if not isinstance(labels, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if classes is not None: if not isinstance(classes, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if not len(data) == len(labels) == len(classes): raise ValueError('Lengths of data, labels and classes do not match!') if not set(list(data)) == set(list(labels)) == set(list(classes)): raise ValueError( 'data, classes and labels dictionaries must have the same keys!') num_features_in_elements = np.unique([sample.size for sample in data.values()]) if len(num_features_in_elements) > 1: raise ValueError( 'different samples have different number of features - invalid!') return True
python
def __validate(data, classes, labels): "Validator of inputs." if not isinstance(data, dict): raise TypeError( 'data must be a dict! keys: sample ID or any unique identifier') if not isinstance(labels, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if classes is not None: if not isinstance(classes, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if not len(data) == len(labels) == len(classes): raise ValueError('Lengths of data, labels and classes do not match!') if not set(list(data)) == set(list(labels)) == set(list(classes)): raise ValueError( 'data, classes and labels dictionaries must have the same keys!') num_features_in_elements = np.unique([sample.size for sample in data.values()]) if len(num_features_in_elements) > 1: raise ValueError( 'different samples have different number of features - invalid!') return True
[ "def", "__validate", "(", "data", ",", "classes", ",", "labels", ")", ":", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "TypeError", "(", "'data must be a dict! keys: sample ID or any unique identifier'", ")", "if", "not", "isinstance", ...
Validator of inputs.
[ "Validator", "of", "inputs", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1321-L1346
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.extend
def extend(self, other): """ Method to extend the dataset vertically (add samples from anotehr dataset). Parameters ---------- other : MLDataset second dataset to be combined with the current (different samples, but same dimensionality) Raises ------ TypeError if input is not an MLDataset. """ if not isinstance(other, MLDataset): raise TypeError('Incorrect type of dataset provided!') # assert self.__dtype==other.dtype, TypeError('Incorrect data type of features!') for sample in other.keys: self.add_sample(sample, other.data[sample], other.labels[sample], other.classes[sample])
python
def extend(self, other): """ Method to extend the dataset vertically (add samples from anotehr dataset). Parameters ---------- other : MLDataset second dataset to be combined with the current (different samples, but same dimensionality) Raises ------ TypeError if input is not an MLDataset. """ if not isinstance(other, MLDataset): raise TypeError('Incorrect type of dataset provided!') # assert self.__dtype==other.dtype, TypeError('Incorrect data type of features!') for sample in other.keys: self.add_sample(sample, other.data[sample], other.labels[sample], other.classes[sample])
[ "def", "extend", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "MLDataset", ")", ":", "raise", "TypeError", "(", "'Incorrect type of dataset provided!'", ")", "# assert self.__dtype==other.dtype, TypeError('Incorrect data type of featu...
Method to extend the dataset vertically (add samples from anotehr dataset). Parameters ---------- other : MLDataset second dataset to be combined with the current (different samples, but same dimensionality) Raises ------ TypeError if input is not an MLDataset.
[ "Method", "to", "extend", "the", "dataset", "vertically", "(", "add", "samples", "from", "anotehr", "dataset", ")", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1349-L1370
michal-stuglik/django-blastplus
blastplus/forms.py
validate_word_size
def validate_word_size(word_size, BLAST_SETS): """Validate word size in blast/tblastn form. """ blast_min_int_word_size = BLAST_SETS.min_word_size blast_max_int_word_size = BLAST_SETS.max_word_size blast_word_size_error = BLAST_SETS.get_word_size_error() try: if len(word_size) <= 0: raise forms.ValidationError(blast_word_size_error) int_word_size = int(word_size) if int_word_size < blast_min_int_word_size: raise forms.ValidationError(blast_word_size_error) if int_word_size >= blast_max_int_word_size: raise forms.ValidationError(blast_word_size_error) except: raise forms.ValidationError(blast_word_size_error) return int_word_size
python
def validate_word_size(word_size, BLAST_SETS): """Validate word size in blast/tblastn form. """ blast_min_int_word_size = BLAST_SETS.min_word_size blast_max_int_word_size = BLAST_SETS.max_word_size blast_word_size_error = BLAST_SETS.get_word_size_error() try: if len(word_size) <= 0: raise forms.ValidationError(blast_word_size_error) int_word_size = int(word_size) if int_word_size < blast_min_int_word_size: raise forms.ValidationError(blast_word_size_error) if int_word_size >= blast_max_int_word_size: raise forms.ValidationError(blast_word_size_error) except: raise forms.ValidationError(blast_word_size_error) return int_word_size
[ "def", "validate_word_size", "(", "word_size", ",", "BLAST_SETS", ")", ":", "blast_min_int_word_size", "=", "BLAST_SETS", ".", "min_word_size", "blast_max_int_word_size", "=", "BLAST_SETS", ".", "max_word_size", "blast_word_size_error", "=", "BLAST_SETS", ".", "get_word_s...
Validate word size in blast/tblastn form.
[ "Validate", "word", "size", "in", "blast", "/", "tblastn", "form", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/forms.py#L113-L135
michal-stuglik/django-blastplus
blastplus/forms.py
validate_sequence
def validate_sequence(sequence: str, sequence_is_as_nucleotide=True): """Validate sequence in blast/tblastn form. """ tmp_seq = tempfile.NamedTemporaryFile(mode="wb+", delete=False) if len(str(sequence).strip()) == 0: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_ERROR_MSG) if str(sequence).strip()[0] != ">": tmp_seq.write(">seq1\n".encode()) tmp_seq.write(sequence.encode()) tmp_seq.close() records = SeqIO.index(tmp_seq.name, "fasta") record_count = len(records) if record_count == 0: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_ERROR_MSG) if record_count > blast_settings.BLAST_MAX_NUMBER_SEQ_IN_INPUT: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_MAX_SEQ_NUMB_ERROR_MSG) # read query sequence from temporary file first_sequence_list_in_file = SeqIO.parse(tmp_seq.name, "fasta") for sequence in first_sequence_list_in_file: if len(sequence.seq) <= 10: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_TOO_SHORT_ERROR_MSG) if sequence_is_as_nucleotide: check_allowed_letters(str(sequence.seq), ALLOWED_NUCL) else: check_allowed_letters(str(sequence.seq), ALLOWED_AMINOACIDS) return tmp_seq
python
def validate_sequence(sequence: str, sequence_is_as_nucleotide=True): """Validate sequence in blast/tblastn form. """ tmp_seq = tempfile.NamedTemporaryFile(mode="wb+", delete=False) if len(str(sequence).strip()) == 0: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_ERROR_MSG) if str(sequence).strip()[0] != ">": tmp_seq.write(">seq1\n".encode()) tmp_seq.write(sequence.encode()) tmp_seq.close() records = SeqIO.index(tmp_seq.name, "fasta") record_count = len(records) if record_count == 0: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_ERROR_MSG) if record_count > blast_settings.BLAST_MAX_NUMBER_SEQ_IN_INPUT: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_MAX_SEQ_NUMB_ERROR_MSG) # read query sequence from temporary file first_sequence_list_in_file = SeqIO.parse(tmp_seq.name, "fasta") for sequence in first_sequence_list_in_file: if len(sequence.seq) <= 10: raise forms.ValidationError(blast_settings.BLAST_CORRECT_SEQ_TOO_SHORT_ERROR_MSG) if sequence_is_as_nucleotide: check_allowed_letters(str(sequence.seq), ALLOWED_NUCL) else: check_allowed_letters(str(sequence.seq), ALLOWED_AMINOACIDS) return tmp_seq
[ "def", "validate_sequence", "(", "sequence", ":", "str", ",", "sequence_is_as_nucleotide", "=", "True", ")", ":", "tmp_seq", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"wb+\"", ",", "delete", "=", "False", ")", "if", "len", "(", "str", ...
Validate sequence in blast/tblastn form.
[ "Validate", "sequence", "in", "blast", "/", "tblastn", "form", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/forms.py#L138-L174
michal-stuglik/django-blastplus
blastplus/forms.py
check_allowed_letters
def check_allowed_letters(seq, allowed_letters_as_set): """Validate sequence: Rise an error if sequence contains undesirable letters. """ # set of unique letters in sequence seq_set = set(seq) not_allowed_letters_in_seq = [x for x in seq_set if str(x).upper() not in allowed_letters_as_set] if len(not_allowed_letters_in_seq) > 0: raise forms.ValidationError( "This sequence type cannot contain letters: " + ", ".join(not_allowed_letters_in_seq))
python
def check_allowed_letters(seq, allowed_letters_as_set): """Validate sequence: Rise an error if sequence contains undesirable letters. """ # set of unique letters in sequence seq_set = set(seq) not_allowed_letters_in_seq = [x for x in seq_set if str(x).upper() not in allowed_letters_as_set] if len(not_allowed_letters_in_seq) > 0: raise forms.ValidationError( "This sequence type cannot contain letters: " + ", ".join(not_allowed_letters_in_seq))
[ "def", "check_allowed_letters", "(", "seq", ",", "allowed_letters_as_set", ")", ":", "# set of unique letters in sequence", "seq_set", "=", "set", "(", "seq", ")", "not_allowed_letters_in_seq", "=", "[", "x", "for", "x", "in", "seq_set", "if", "str", "(", "x", "...
Validate sequence: Rise an error if sequence contains undesirable letters.
[ "Validate", "sequence", ":", "Rise", "an", "error", "if", "sequence", "contains", "undesirable", "letters", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/forms.py#L177-L187
tomduck/pandoc-xnos
pandocxnos/core.py
_repeat
def _repeat(func): """Repeats func(...) call until something other than None is returned.""" @functools.wraps(func) def wrapper(*args, **kwargs): """Repeats the call until True is returned.""" ret = None while ret is None: ret = func(*args, **kwargs) return ret return wrapper
python
def _repeat(func): """Repeats func(...) call until something other than None is returned.""" @functools.wraps(func) def wrapper(*args, **kwargs): """Repeats the call until True is returned.""" ret = None while ret is None: ret = func(*args, **kwargs) return ret return wrapper
[ "def", "_repeat", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Repeats the call until True is returned.\"\"\"", "ret", "=", "None", "while", "ret", "i...
Repeats func(...) call until something other than None is returned.
[ "Repeats", "func", "(", "...", ")", "call", "until", "something", "other", "than", "None", "is", "returned", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L124-L133
tomduck/pandoc-xnos
pandocxnos/core.py
init
def init(pandocversion=None, doc=None): """Sets or determines the pandoc version. This must be called. The pandoc version is needed for multi-version support. See: https://github.com/jgm/pandoc/issues/2640 Returns the pandoc version.""" # This requires some care because we can't be sure that a call to 'pandoc' # will work. It could be 'pandoc-1.17.0.2' or some other name. Try # checking the parent process first, and only make a call to 'pandoc' as # a last resort. global _PANDOCVERSION # pylint: disable=global-statement pattern = re.compile(r'^[1-2]\.[0-9]+(?:\.[0-9]+)?(?:\.[0-9]+)?$') if 'PANDOC_VERSION' in os.environ: # Available for pandoc >= 1.19.1 pandocversion = str(os.environ['PANDOC_VERSION']) if not pandocversion is None: # Test the result and if it is OK then store it in _PANDOCVERSION if pattern.match(pandocversion): _PANDOCVERSION = pandocversion return _PANDOCVERSION else: msg = 'Cannot understand pandocversion=%s'%pandocversion raise RuntimeError(msg) if not doc is None: if 'pandoc-api-version' in doc: # This could be either 1.18 or 1.19; there is no way to # distinguish them (but there isn't a use case in pandoc-fignos # and friends where it matters) _PANDOCVERSION = '1.18' return _PANDOCVERSION # Get the command try: # Get the path for the parent process if os.name == 'nt': # psutil appears to work differently for windows command = psutil.Process(os.getpid()).parent().parent().exe() else: command = psutil.Process(os.getpid()).parent().exe() if not os.path.basename(command).startswith('pandoc'): raise RuntimeError('pandoc not found') except: # pylint: disable=bare-except # Call whatever pandoc is available and hope for the best command = 'pandoc' # Make the call try: # Get the version number and confirm it conforms to expectations output = subprocess.check_output([command, '-v']) line = output.decode('utf-8').split('\n')[0] pandocversion = line.split(' ')[-1].strip() except: # pylint: disable=bare-except pandocversion = '' # Test the result and if it is OK then store it in _PANDOCVERSION if pattern.match(pandocversion): _PANDOCVERSION = pandocversion if _PANDOCVERSION is None: msg = """Cannot determine pandoc version. Please file an issue at https://github.com/tomduck/pandocfiltering/issues""" raise RuntimeError(textwrap.dedent(msg)) return _PANDOCVERSION
python
def init(pandocversion=None, doc=None): """Sets or determines the pandoc version. This must be called. The pandoc version is needed for multi-version support. See: https://github.com/jgm/pandoc/issues/2640 Returns the pandoc version.""" # This requires some care because we can't be sure that a call to 'pandoc' # will work. It could be 'pandoc-1.17.0.2' or some other name. Try # checking the parent process first, and only make a call to 'pandoc' as # a last resort. global _PANDOCVERSION # pylint: disable=global-statement pattern = re.compile(r'^[1-2]\.[0-9]+(?:\.[0-9]+)?(?:\.[0-9]+)?$') if 'PANDOC_VERSION' in os.environ: # Available for pandoc >= 1.19.1 pandocversion = str(os.environ['PANDOC_VERSION']) if not pandocversion is None: # Test the result and if it is OK then store it in _PANDOCVERSION if pattern.match(pandocversion): _PANDOCVERSION = pandocversion return _PANDOCVERSION else: msg = 'Cannot understand pandocversion=%s'%pandocversion raise RuntimeError(msg) if not doc is None: if 'pandoc-api-version' in doc: # This could be either 1.18 or 1.19; there is no way to # distinguish them (but there isn't a use case in pandoc-fignos # and friends where it matters) _PANDOCVERSION = '1.18' return _PANDOCVERSION # Get the command try: # Get the path for the parent process if os.name == 'nt': # psutil appears to work differently for windows command = psutil.Process(os.getpid()).parent().parent().exe() else: command = psutil.Process(os.getpid()).parent().exe() if not os.path.basename(command).startswith('pandoc'): raise RuntimeError('pandoc not found') except: # pylint: disable=bare-except # Call whatever pandoc is available and hope for the best command = 'pandoc' # Make the call try: # Get the version number and confirm it conforms to expectations output = subprocess.check_output([command, '-v']) line = output.decode('utf-8').split('\n')[0] pandocversion = line.split(' ')[-1].strip() except: # pylint: disable=bare-except pandocversion = '' # Test the result and if it is OK then store it in _PANDOCVERSION if pattern.match(pandocversion): _PANDOCVERSION = pandocversion if _PANDOCVERSION is None: msg = """Cannot determine pandoc version. Please file an issue at https://github.com/tomduck/pandocfiltering/issues""" raise RuntimeError(textwrap.dedent(msg)) return _PANDOCVERSION
[ "def", "init", "(", "pandocversion", "=", "None", ",", "doc", "=", "None", ")", ":", "# This requires some care because we can't be sure that a call to 'pandoc'", "# will work. It could be 'pandoc-1.17.0.2' or some other name. Try", "# checking the parent process first, and only make a...
Sets or determines the pandoc version. This must be called. The pandoc version is needed for multi-version support. See: https://github.com/jgm/pandoc/issues/2640 Returns the pandoc version.
[ "Sets", "or", "determines", "the", "pandoc", "version", ".", "This", "must", "be", "called", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L144-L212
tomduck/pandoc-xnos
pandocxnos/core.py
get_meta
def get_meta(meta, name): """Retrieves the metadata variable 'name' from the 'meta' dict.""" assert name in meta data = meta[name] if data['t'] in ['MetaString', 'MetaBool']: return data['c'] elif data['t'] == 'MetaInlines': # Handle bug in pandoc 2.2.3 and 2.2.3.1: Return boolean value rather # than strings, as appropriate. if len(data['c']) == 1 and data['c'][0]['t'] == 'Str': if data['c'][0]['c'] in ['true', 'True', 'TRUE']: return True elif data['c'][0]['c'] in ['false', 'False', 'FALSE']: return False return stringify(data['c']) elif data['t'] == 'MetaList': return [stringify(v['c']) for v in data['c']] else: raise RuntimeError("Could not understand metadata variable '%s'." % name)
python
def get_meta(meta, name): """Retrieves the metadata variable 'name' from the 'meta' dict.""" assert name in meta data = meta[name] if data['t'] in ['MetaString', 'MetaBool']: return data['c'] elif data['t'] == 'MetaInlines': # Handle bug in pandoc 2.2.3 and 2.2.3.1: Return boolean value rather # than strings, as appropriate. if len(data['c']) == 1 and data['c'][0]['t'] == 'Str': if data['c'][0]['c'] in ['true', 'True', 'TRUE']: return True elif data['c'][0]['c'] in ['false', 'False', 'FALSE']: return False return stringify(data['c']) elif data['t'] == 'MetaList': return [stringify(v['c']) for v in data['c']] else: raise RuntimeError("Could not understand metadata variable '%s'." % name)
[ "def", "get_meta", "(", "meta", ",", "name", ")", ":", "assert", "name", "in", "meta", "data", "=", "meta", "[", "name", "]", "if", "data", "[", "'t'", "]", "in", "[", "'MetaString'", ",", "'MetaBool'", "]", ":", "return", "data", "[", "'c'", "]", ...
Retrieves the metadata variable 'name' from the 'meta' dict.
[ "Retrieves", "the", "metadata", "variable", "name", "from", "the", "meta", "dict", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L236-L256
tomduck/pandoc-xnos
pandocxnos/core.py
elt
def elt(eltType, numargs): # pylint: disable=invalid-name """Returns Element(*value) to create pandoc json elements. This should be used in place of pandocfilters.elt(). This version ensures that the content is stored in a list, not a tuple. """ def Element(*value): # pylint: disable=invalid-name """Creates an element.""" el = _elt(eltType, numargs)(*value) if isinstance(el['c'], tuple): el['c'] = list(el['c']) # The content should be a list, not tuple return el return Element
python
def elt(eltType, numargs): # pylint: disable=invalid-name """Returns Element(*value) to create pandoc json elements. This should be used in place of pandocfilters.elt(). This version ensures that the content is stored in a list, not a tuple. """ def Element(*value): # pylint: disable=invalid-name """Creates an element.""" el = _elt(eltType, numargs)(*value) if isinstance(el['c'], tuple): el['c'] = list(el['c']) # The content should be a list, not tuple return el return Element
[ "def", "elt", "(", "eltType", ",", "numargs", ")", ":", "# pylint: disable=invalid-name", "def", "Element", "(", "*", "value", ")", ":", "# pylint: disable=invalid-name", "\"\"\"Creates an element.\"\"\"", "el", "=", "_elt", "(", "eltType", ",", "numargs", ")", "(...
Returns Element(*value) to create pandoc json elements. This should be used in place of pandocfilters.elt(). This version ensures that the content is stored in a list, not a tuple.
[ "Returns", "Element", "(", "*", "value", ")", "to", "create", "pandoc", "json", "elements", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L261-L273
tomduck/pandoc-xnos
pandocxnos/core.py
_getel
def _getel(key, value): """Returns an element given a key and value.""" if key in ['HorizontalRule', 'Null']: return elt(key, 0)() elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList', 'DefinitionList', 'HorizontalRule', 'Null']: return elt(key, 1)(value) return elt(key, len(value))(*value)
python
def _getel(key, value): """Returns an element given a key and value.""" if key in ['HorizontalRule', 'Null']: return elt(key, 0)() elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList', 'DefinitionList', 'HorizontalRule', 'Null']: return elt(key, 1)(value) return elt(key, len(value))(*value)
[ "def", "_getel", "(", "key", ",", "value", ")", ":", "if", "key", "in", "[", "'HorizontalRule'", ",", "'Null'", "]", ":", "return", "elt", "(", "key", ",", "0", ")", "(", ")", "elif", "key", "in", "[", "'Plain'", ",", "'Para'", ",", "'BlockQuote'",...
Returns an element given a key and value.
[ "Returns", "an", "element", "given", "a", "key", "and", "value", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L277-L284
tomduck/pandoc-xnos
pandocxnos/core.py
quotify
def quotify(x): """Replaces Quoted elements in element list 'x' with quoted strings. Pandoc uses the Quoted element in its json when --smart is enabled. Output to TeX/pdf automatically triggers --smart. stringify() ignores Quoted elements. Use quotify() first to replace Quoted elements in 'x' with quoted strings. 'x' should be a deep copy so that the underlying document is left untouched. Returns x.""" def _quotify(key, value, fmt, meta): # pylint: disable=unused-argument """Replaced Quoted elements with quoted strings.""" if key == 'Quoted': ret = [] quote = '"' if value[0]['t'] == 'DoubleQuote' else "'" if value[1][0]['t'] == 'Str': value[1][0]['c'] = quote + value[1][0]['c'] else: ret.append(Str(quote)) if value[1][-1]['t'] == 'Str': value[1][-1]['c'] = value[1][-1]['c'] + quote ret += value[1] else: ret += value[1] + [Str(quote)] return ret return None return walk(walk(x, _quotify, '', {}), join_strings, '', {})
python
def quotify(x): """Replaces Quoted elements in element list 'x' with quoted strings. Pandoc uses the Quoted element in its json when --smart is enabled. Output to TeX/pdf automatically triggers --smart. stringify() ignores Quoted elements. Use quotify() first to replace Quoted elements in 'x' with quoted strings. 'x' should be a deep copy so that the underlying document is left untouched. Returns x.""" def _quotify(key, value, fmt, meta): # pylint: disable=unused-argument """Replaced Quoted elements with quoted strings.""" if key == 'Quoted': ret = [] quote = '"' if value[0]['t'] == 'DoubleQuote' else "'" if value[1][0]['t'] == 'Str': value[1][0]['c'] = quote + value[1][0]['c'] else: ret.append(Str(quote)) if value[1][-1]['t'] == 'Str': value[1][-1]['c'] = value[1][-1]['c'] + quote ret += value[1] else: ret += value[1] + [Str(quote)] return ret return None return walk(walk(x, _quotify, '', {}), join_strings, '', {})
[ "def", "quotify", "(", "x", ")", ":", "def", "_quotify", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# pylint: disable=unused-argument", "\"\"\"Replaced Quoted elements with quoted strings.\"\"\"", "if", "key", "==", "'Quoted'", ":", "ret", "=", ...
Replaces Quoted elements in element list 'x' with quoted strings. Pandoc uses the Quoted element in its json when --smart is enabled. Output to TeX/pdf automatically triggers --smart. stringify() ignores Quoted elements. Use quotify() first to replace Quoted elements in 'x' with quoted strings. 'x' should be a deep copy so that the underlying document is left untouched. Returns x.
[ "Replaces", "Quoted", "elements", "in", "element", "list", "x", "with", "quoted", "strings", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L292-L322
tomduck/pandoc-xnos
pandocxnos/core.py
dollarfy
def dollarfy(x): """Replaces Math elements in element list 'x' with a $-enclosed string. stringify() passes through TeX math. Use dollarfy(x) first to replace Math elements with math strings set in dollars. 'x' should be a deep copy so that the underlying document is left untouched. Returns 'x'.""" def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument """Replaces Math elements""" if key == 'Math': return Str('$' + value[1] + '$') return None return walk(x, _dollarfy, '', {})
python
def dollarfy(x): """Replaces Math elements in element list 'x' with a $-enclosed string. stringify() passes through TeX math. Use dollarfy(x) first to replace Math elements with math strings set in dollars. 'x' should be a deep copy so that the underlying document is left untouched. Returns 'x'.""" def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument """Replaces Math elements""" if key == 'Math': return Str('$' + value[1] + '$') return None return walk(x, _dollarfy, '', {})
[ "def", "dollarfy", "(", "x", ")", ":", "def", "_dollarfy", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# pylint: disable=unused-argument", "\"\"\"Replaces Math elements\"\"\"", "if", "key", "==", "'Math'", ":", "return", "Str", "(", "'$'", ...
Replaces Math elements in element list 'x' with a $-enclosed string. stringify() passes through TeX math. Use dollarfy(x) first to replace Math elements with math strings set in dollars. 'x' should be a deep copy so that the underlying document is left untouched. Returns 'x'.
[ "Replaces", "Math", "elements", "in", "element", "list", "x", "with", "a", "$", "-", "enclosed", "string", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L327-L342
tomduck/pandoc-xnos
pandocxnos/core.py
extract_attrs
def extract_attrs(x, n): """Extracts attributes from element list 'x' beginning at index 'n'. The elements encapsulating the attributes (typically a series of Str and Space elements) are removed from 'x'. Items before index 'n' are left unchanged. Returns the attributes in pandoc format. A ValueError is raised if attributes aren't found. An IndexError is raised if the index 'n' is out of range.""" # Check for the start of the attributes string if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')): raise ValueError('Attributes not found.') # It starts with {, so this *may* be an attributes list. Search for where # the attributes end. Do not consider } in quoted elements. seq = [] # A sequence of saved values quotechar = None # Used to keep track of quotes in strings flag = False # Flags that an attributes list was found i = 0 # Initialization for i, v in enumerate(x[n:]): # Scan through the list if v and v['t'] == 'Str': # Scan for } outside of a quote for j, c in enumerate(v['c']): if c == quotechar: # This is an end quote quotechar = None elif c in ['"', "'"]: # This is an open quote quotechar = c elif c == '}' and quotechar is None: # The attributes end here # Split the string at the } and save the pieces head, tail = v['c'][:j+1], v['c'][j+1:] x[n+i] = copy.deepcopy(v) x[n+i]['c'] = tail v['c'] = head flag = True break seq.append(v) if flag: break if flag: # Attributes string was found, so process it # Delete empty and extracted elements if x[n+i]['t'] == 'Str' and not x[n+i]['c']: del x[n+i] del x[n:n+i] # Process the attrs attrstr = stringify(dollarfy(quotify(seq))).strip() attrs = PandocAttributes(attrstr, 'markdown').to_pandoc() # Remove extranneous quotes from kvs for i, (k, v) in enumerate(attrs[2]): # pylint: disable=unused-variable if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'": attrs[2][i][1] = attrs[2][i][1][1:-1] # We're done return attrs # Attributes not found raise ValueError('Attributes not found.')
python
def extract_attrs(x, n): """Extracts attributes from element list 'x' beginning at index 'n'. The elements encapsulating the attributes (typically a series of Str and Space elements) are removed from 'x'. Items before index 'n' are left unchanged. Returns the attributes in pandoc format. A ValueError is raised if attributes aren't found. An IndexError is raised if the index 'n' is out of range.""" # Check for the start of the attributes string if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')): raise ValueError('Attributes not found.') # It starts with {, so this *may* be an attributes list. Search for where # the attributes end. Do not consider } in quoted elements. seq = [] # A sequence of saved values quotechar = None # Used to keep track of quotes in strings flag = False # Flags that an attributes list was found i = 0 # Initialization for i, v in enumerate(x[n:]): # Scan through the list if v and v['t'] == 'Str': # Scan for } outside of a quote for j, c in enumerate(v['c']): if c == quotechar: # This is an end quote quotechar = None elif c in ['"', "'"]: # This is an open quote quotechar = c elif c == '}' and quotechar is None: # The attributes end here # Split the string at the } and save the pieces head, tail = v['c'][:j+1], v['c'][j+1:] x[n+i] = copy.deepcopy(v) x[n+i]['c'] = tail v['c'] = head flag = True break seq.append(v) if flag: break if flag: # Attributes string was found, so process it # Delete empty and extracted elements if x[n+i]['t'] == 'Str' and not x[n+i]['c']: del x[n+i] del x[n:n+i] # Process the attrs attrstr = stringify(dollarfy(quotify(seq))).strip() attrs = PandocAttributes(attrstr, 'markdown').to_pandoc() # Remove extranneous quotes from kvs for i, (k, v) in enumerate(attrs[2]): # pylint: disable=unused-variable if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'": attrs[2][i][1] = attrs[2][i][1][1:-1] # We're done return attrs # Attributes not found raise ValueError('Attributes not found.')
[ "def", "extract_attrs", "(", "x", ",", "n", ")", ":", "# Check for the start of the attributes string", "if", "not", "(", "x", "[", "n", "]", "[", "'t'", "]", "==", "'Str'", "and", "x", "[", "n", "]", "[", "'c'", "]", ".", "startswith", "(", "'{'", "...
Extracts attributes from element list 'x' beginning at index 'n'. The elements encapsulating the attributes (typically a series of Str and Space elements) are removed from 'x'. Items before index 'n' are left unchanged. Returns the attributes in pandoc format. A ValueError is raised if attributes aren't found. An IndexError is raised if the index 'n' is out of range.
[ "Extracts", "attributes", "from", "element", "list", "x", "beginning", "at", "index", "n", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L347-L410
tomduck/pandoc-xnos
pandocxnos/core.py
_join_strings
def _join_strings(x): """Joins adjacent Str elements found in the element list 'x'.""" for i in range(len(x)-1): # Process successive pairs of elements if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str': x[i]['c'] += x[i+1]['c'] del x[i+1] # In-place deletion of element from list return None # Forces processing to repeat return True
python
def _join_strings(x): """Joins adjacent Str elements found in the element list 'x'.""" for i in range(len(x)-1): # Process successive pairs of elements if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str': x[i]['c'] += x[i+1]['c'] del x[i+1] # In-place deletion of element from list return None # Forces processing to repeat return True
[ "def", "_join_strings", "(", "x", ")", ":", "for", "i", "in", "range", "(", "len", "(", "x", ")", "-", "1", ")", ":", "# Process successive pairs of elements", "if", "x", "[", "i", "]", "[", "'t'", "]", "==", "'Str'", "and", "x", "[", "i", "+", "...
Joins adjacent Str elements found in the element list 'x'.
[ "Joins", "adjacent", "Str", "elements", "found", "in", "the", "element", "list", "x", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L436-L443
tomduck/pandoc-xnos
pandocxnos/core.py
join_strings
def join_strings(key, value, fmt, meta): # pylint: disable=unused-argument """Joins adjacent Str elements in the 'value' list.""" if key in ['Para', 'Plain']: _join_strings(value) elif key == 'Image': _join_strings(value[-2]) elif key == 'Table': _join_strings(value[-5])
python
def join_strings(key, value, fmt, meta): # pylint: disable=unused-argument """Joins adjacent Str elements in the 'value' list.""" if key in ['Para', 'Plain']: _join_strings(value) elif key == 'Image': _join_strings(value[-2]) elif key == 'Table': _join_strings(value[-5])
[ "def", "join_strings", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# pylint: disable=unused-argument", "if", "key", "in", "[", "'Para'", ",", "'Plain'", "]", ":", "_join_strings", "(", "value", ")", "elif", "key", "==", "'Image'", ":", ...
Joins adjacent Str elements in the 'value' list.
[ "Joins", "adjacent", "Str", "elements", "in", "the", "value", "list", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L445-L452
tomduck/pandoc-xnos
pandocxnos/core.py
_is_broken_ref
def _is_broken_ref(key1, value1, key2, value2): """True if this is a broken reference; False otherwise.""" # A link followed by a string may represent a broken reference if key1 != 'Link' or key2 != 'Str': return False # Assemble the parts n = 0 if _PANDOCVERSION < '1.16' else 1 if isinstance(value1[n][0]['c'], list): # Occurs when there is quoted text in an actual link. This is not # a broken link. See Issue #1. return False s = value1[n][0]['c'] + value2 # Return True if this matches the reference regex return True if _REF.match(s) else False
python
def _is_broken_ref(key1, value1, key2, value2): """True if this is a broken reference; False otherwise.""" # A link followed by a string may represent a broken reference if key1 != 'Link' or key2 != 'Str': return False # Assemble the parts n = 0 if _PANDOCVERSION < '1.16' else 1 if isinstance(value1[n][0]['c'], list): # Occurs when there is quoted text in an actual link. This is not # a broken link. See Issue #1. return False s = value1[n][0]['c'] + value2 # Return True if this matches the reference regex return True if _REF.match(s) else False
[ "def", "_is_broken_ref", "(", "key1", ",", "value1", ",", "key2", ",", "value2", ")", ":", "# A link followed by a string may represent a broken reference", "if", "key1", "!=", "'Link'", "or", "key2", "!=", "'Str'", ":", "return", "False", "# Assemble the parts", "n...
True if this is a broken reference; False otherwise.
[ "True", "if", "this", "is", "a", "broken", "reference", ";", "False", "otherwise", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L462-L477
tomduck/pandoc-xnos
pandocxnos/core.py
_repair_refs
def _repair_refs(x): """Performs the repair on the element list 'x'.""" if _PANDOCVERSION is None: raise RuntimeError('Module uninitialized. Please call init().') # Scan the element list x for i in range(len(x)-1): # Check for broken references if _is_broken_ref(x[i]['t'], x[i]['c'] if 'c' in x[i] else [], x[i+1]['t'], x[i+1]['c'] if 'c' in x[i+1] else []): # Get the reference string n = 0 if _PANDOCVERSION < '1.16' else 1 s = x[i]['c'][n][0]['c'] + x[i+1]['c'] # Chop it into pieces. Note that the prefix and suffix may be # parts of other broken references. prefix, label, suffix = _REF.match(s).groups() # Insert the suffix, label and prefix back into x. Do it in this # order so that the indexing works. if suffix: x.insert(i+2, Str(suffix)) x[i+1] = Cite( [{"citationId":label, "citationPrefix":[], "citationSuffix":[], "citationNoteNum":0, "citationMode":{"t":"AuthorInText", "c":[]}, "citationHash":0}], [Str('@' + label)]) if prefix: if i > 0 and x[i-1]['t'] == 'Str': x[i-1]['c'] = x[i-1]['c'] + prefix del x[i] else: x[i] = Str(prefix) else: del x[i] return None # Forces processing to repeat return True
python
def _repair_refs(x): """Performs the repair on the element list 'x'.""" if _PANDOCVERSION is None: raise RuntimeError('Module uninitialized. Please call init().') # Scan the element list x for i in range(len(x)-1): # Check for broken references if _is_broken_ref(x[i]['t'], x[i]['c'] if 'c' in x[i] else [], x[i+1]['t'], x[i+1]['c'] if 'c' in x[i+1] else []): # Get the reference string n = 0 if _PANDOCVERSION < '1.16' else 1 s = x[i]['c'][n][0]['c'] + x[i+1]['c'] # Chop it into pieces. Note that the prefix and suffix may be # parts of other broken references. prefix, label, suffix = _REF.match(s).groups() # Insert the suffix, label and prefix back into x. Do it in this # order so that the indexing works. if suffix: x.insert(i+2, Str(suffix)) x[i+1] = Cite( [{"citationId":label, "citationPrefix":[], "citationSuffix":[], "citationNoteNum":0, "citationMode":{"t":"AuthorInText", "c":[]}, "citationHash":0}], [Str('@' + label)]) if prefix: if i > 0 and x[i-1]['t'] == 'Str': x[i-1]['c'] = x[i-1]['c'] + prefix del x[i] else: x[i] = Str(prefix) else: del x[i] return None # Forces processing to repeat return True
[ "def", "_repair_refs", "(", "x", ")", ":", "if", "_PANDOCVERSION", "is", "None", ":", "raise", "RuntimeError", "(", "'Module uninitialized. Please call init().'", ")", "# Scan the element list x", "for", "i", "in", "range", "(", "len", "(", "x", ")", "-", "1", ...
Performs the repair on the element list 'x'.
[ "Performs", "the", "repair", "on", "the", "element", "list", "x", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L480-L524
tomduck/pandoc-xnos
pandocxnos/core.py
repair_refs
def repair_refs(key, value, fmt, meta): # pylint: disable=unused-argument """Using "-f markdown+autolink_bare_uris" with pandoc < 1.18 splits a reference like "{@fig:one}" into email Link and Str elements. This function replaces the mess with the Cite and Str elements we normally get. Call this before any reference processing.""" if _PANDOCVERSION >= '1.18': return # The problem spans multiple elements, and so can only be identified in # element lists. Element lists are encapsulated in different ways. We # must process them all. if key in ('Para', 'Plain'): _repair_refs(value) elif key == 'Image': _repair_refs(value[-2]) elif key == 'Table': _repair_refs(value[-5])
python
def repair_refs(key, value, fmt, meta): # pylint: disable=unused-argument """Using "-f markdown+autolink_bare_uris" with pandoc < 1.18 splits a reference like "{@fig:one}" into email Link and Str elements. This function replaces the mess with the Cite and Str elements we normally get. Call this before any reference processing.""" if _PANDOCVERSION >= '1.18': return # The problem spans multiple elements, and so can only be identified in # element lists. Element lists are encapsulated in different ways. We # must process them all. if key in ('Para', 'Plain'): _repair_refs(value) elif key == 'Image': _repair_refs(value[-2]) elif key == 'Table': _repair_refs(value[-5])
[ "def", "repair_refs", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# pylint: disable=unused-argument", "if", "_PANDOCVERSION", ">=", "'1.18'", ":", "return", "# The problem spans multiple elements, and so can only be identified in", "# element lists. Element...
Using "-f markdown+autolink_bare_uris" with pandoc < 1.18 splits a reference like "{@fig:one}" into email Link and Str elements. This function replaces the mess with the Cite and Str elements we normally get. Call this before any reference processing.
[ "Using", "-", "f", "markdown", "+", "autolink_bare_uris", "with", "pandoc", "<", "1", ".", "18", "splits", "a", "reference", "like", "{" ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L526-L544
tomduck/pandoc-xnos
pandocxnos/core.py
_extract_modifier
def _extract_modifier(x, i, attrs): """Extracts the */+/! modifier in front of the Cite at index 'i' of the element list 'x'. The modifier is stored in 'attrs'. Returns the updated index 'i'.""" global _cleveref_tex_flag # pylint: disable=global-statement assert x[i]['t'] == 'Cite' assert i > 0 # Check the previous element for a modifier in the last character if x[i-1]['t'] == 'Str': modifier = x[i-1]['c'][-1] if not _cleveref_tex_flag and modifier in ['*', '+']: _cleveref_tex_flag = True if modifier in ['*', '+', '!']: attrs[2].append(['modifier', modifier]) if len(x[i-1]['c']) > 1: # Lop the modifier off of the string x[i-1]['c'] = x[i-1]['c'][:-1] else: # The element contains only the modifier; delete it del x[i-1] i -= 1 return i
python
def _extract_modifier(x, i, attrs): """Extracts the */+/! modifier in front of the Cite at index 'i' of the element list 'x'. The modifier is stored in 'attrs'. Returns the updated index 'i'.""" global _cleveref_tex_flag # pylint: disable=global-statement assert x[i]['t'] == 'Cite' assert i > 0 # Check the previous element for a modifier in the last character if x[i-1]['t'] == 'Str': modifier = x[i-1]['c'][-1] if not _cleveref_tex_flag and modifier in ['*', '+']: _cleveref_tex_flag = True if modifier in ['*', '+', '!']: attrs[2].append(['modifier', modifier]) if len(x[i-1]['c']) > 1: # Lop the modifier off of the string x[i-1]['c'] = x[i-1]['c'][:-1] else: # The element contains only the modifier; delete it del x[i-1] i -= 1 return i
[ "def", "_extract_modifier", "(", "x", ",", "i", ",", "attrs", ")", ":", "global", "_cleveref_tex_flag", "# pylint: disable=global-statement", "assert", "x", "[", "i", "]", "[", "'t'", "]", "==", "'Cite'", "assert", "i", ">", "0", "# Check the previous element fo...
Extracts the */+/! modifier in front of the Cite at index 'i' of the element list 'x'. The modifier is stored in 'attrs'. Returns the updated index 'i'.
[ "Extracts", "the", "*", "/", "+", "/", "!", "modifier", "in", "front", "of", "the", "Cite", "at", "index", "i", "of", "the", "element", "list", "x", ".", "The", "modifier", "is", "stored", "in", "attrs", ".", "Returns", "the", "updated", "index", "i"...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L554-L577
tomduck/pandoc-xnos
pandocxnos/core.py
_remove_brackets
def _remove_brackets(x, i): """Removes curly brackets surrounding the Cite element at index 'i' in the element list 'x'. It is assumed that the modifier has been extracted. Empty strings are deleted from 'x'.""" assert x[i]['t'] == 'Cite' assert i > 0 and i < len(x) - 1 # Check if the surrounding elements are strings if not x[i-1]['t'] == x[i+1]['t'] == 'Str': return # Trim off curly brackets if x[i-1]['c'].endswith('{') and x[i+1]['c'].startswith('}'): if len(x[i+1]['c']) > 1: x[i+1]['c'] = x[i+1]['c'][1:] else: del x[i+1] if len(x[i-1]['c']) > 1: x[i-1]['c'] = x[i-1]['c'][:-1] else: del x[i-1]
python
def _remove_brackets(x, i): """Removes curly brackets surrounding the Cite element at index 'i' in the element list 'x'. It is assumed that the modifier has been extracted. Empty strings are deleted from 'x'.""" assert x[i]['t'] == 'Cite' assert i > 0 and i < len(x) - 1 # Check if the surrounding elements are strings if not x[i-1]['t'] == x[i+1]['t'] == 'Str': return # Trim off curly brackets if x[i-1]['c'].endswith('{') and x[i+1]['c'].startswith('}'): if len(x[i+1]['c']) > 1: x[i+1]['c'] = x[i+1]['c'][1:] else: del x[i+1] if len(x[i-1]['c']) > 1: x[i-1]['c'] = x[i-1]['c'][:-1] else: del x[i-1]
[ "def", "_remove_brackets", "(", "x", ",", "i", ")", ":", "assert", "x", "[", "i", "]", "[", "'t'", "]", "==", "'Cite'", "assert", "i", ">", "0", "and", "i", "<", "len", "(", "x", ")", "-", "1", "# Check if the surrounding elements are strings", "if", ...
Removes curly brackets surrounding the Cite element at index 'i' in the element list 'x'. It is assumed that the modifier has been extracted. Empty strings are deleted from 'x'.
[ "Removes", "curly", "brackets", "surrounding", "the", "Cite", "element", "at", "index", "i", "in", "the", "element", "list", "x", ".", "It", "is", "assumed", "that", "the", "modifier", "has", "been", "extracted", ".", "Empty", "strings", "are", "deleted", ...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L579-L601
tomduck/pandoc-xnos
pandocxnos/core.py
_process_refs
def _process_refs(x, labels): """Strips surrounding curly braces and adds modifiers to the attributes of Cite elements. Only references with labels in the 'labels' list are processed. Repeats processing (via decorator) until no more broken references are found.""" # Scan the element list x for Cite elements with known labels for i, v in enumerate(x): if v['t'] == 'Cite' and len(v['c']) == 2 and \ _get_label(v['t'], v['c']) in labels: # A new reference was found; create some empty attributes for it attrs = ['', [], []] # Extract the modifiers. 'attrs' is updated in place. Element # deletion could change the index of the Cite being processed. if i > 0: i = _extract_modifier(x, i, attrs) # Attach the attributes v['c'].insert(0, attrs) # Remove surrounding brackets if i > 0 and i < len(x)-1: _remove_brackets(x, i) # The element list may be changed return None # Forces processing to repeat via _repeat decorator return True
python
def _process_refs(x, labels): """Strips surrounding curly braces and adds modifiers to the attributes of Cite elements. Only references with labels in the 'labels' list are processed. Repeats processing (via decorator) until no more broken references are found.""" # Scan the element list x for Cite elements with known labels for i, v in enumerate(x): if v['t'] == 'Cite' and len(v['c']) == 2 and \ _get_label(v['t'], v['c']) in labels: # A new reference was found; create some empty attributes for it attrs = ['', [], []] # Extract the modifiers. 'attrs' is updated in place. Element # deletion could change the index of the Cite being processed. if i > 0: i = _extract_modifier(x, i, attrs) # Attach the attributes v['c'].insert(0, attrs) # Remove surrounding brackets if i > 0 and i < len(x)-1: _remove_brackets(x, i) # The element list may be changed return None # Forces processing to repeat via _repeat decorator return True
[ "def", "_process_refs", "(", "x", ",", "labels", ")", ":", "# Scan the element list x for Cite elements with known labels", "for", "i", ",", "v", "in", "enumerate", "(", "x", ")", ":", "if", "v", "[", "'t'", "]", "==", "'Cite'", "and", "len", "(", "v", "["...
Strips surrounding curly braces and adds modifiers to the attributes of Cite elements. Only references with labels in the 'labels' list are processed. Repeats processing (via decorator) until no more broken references are found.
[ "Strips", "surrounding", "curly", "braces", "and", "adds", "modifiers", "to", "the", "attributes", "of", "Cite", "elements", ".", "Only", "references", "with", "labels", "in", "the", "labels", "list", "are", "processed", ".", "Repeats", "processing", "(", "via...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L604-L633
tomduck/pandoc-xnos
pandocxnos/core.py
process_refs_factory
def process_refs_factory(labels): """Returns process_refs(key, value, fmt, meta) action that processes text around a reference. Only references with labels found in the 'labels' list are processed. Consider the markdown "{+@fig:1}", which represents a reference to a figure. "@" denotes a reference, "fig:1" is the reference's label, and "+" is a modifier. Valid modifiers are '+, '*' and '!'. This function strips curly braces and adds the modifiers to the attributes of Cite elements. Cite attributes must be detached before the document is written to STDOUT because pandoc doesn't recognize them. Alternatively, use an action from replace_refs_factory() to replace the references altogether. """ # pylint: disable=unused-argument def process_refs(key, value, fmt, meta): """Instates Ref elements.""" # References may occur in a variety of places; we must process them # all. if key in ['Para', 'Plain']: _process_refs(value, labels) elif key == 'Image': _process_refs(value[-2], labels) elif key == 'Table': _process_refs(value[-5], labels) elif key == 'Span': _process_refs(value[-1], labels) elif key == 'Emph': _process_refs(value, labels) elif key == 'Strong': _process_refs(value, labels) return process_refs
python
def process_refs_factory(labels): """Returns process_refs(key, value, fmt, meta) action that processes text around a reference. Only references with labels found in the 'labels' list are processed. Consider the markdown "{+@fig:1}", which represents a reference to a figure. "@" denotes a reference, "fig:1" is the reference's label, and "+" is a modifier. Valid modifiers are '+, '*' and '!'. This function strips curly braces and adds the modifiers to the attributes of Cite elements. Cite attributes must be detached before the document is written to STDOUT because pandoc doesn't recognize them. Alternatively, use an action from replace_refs_factory() to replace the references altogether. """ # pylint: disable=unused-argument def process_refs(key, value, fmt, meta): """Instates Ref elements.""" # References may occur in a variety of places; we must process them # all. if key in ['Para', 'Plain']: _process_refs(value, labels) elif key == 'Image': _process_refs(value[-2], labels) elif key == 'Table': _process_refs(value[-5], labels) elif key == 'Span': _process_refs(value[-1], labels) elif key == 'Emph': _process_refs(value, labels) elif key == 'Strong': _process_refs(value, labels) return process_refs
[ "def", "process_refs_factory", "(", "labels", ")", ":", "# pylint: disable=unused-argument", "def", "process_refs", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "\"\"\"Instates Ref elements.\"\"\"", "# References may occur in a variety of places; we must proce...
Returns process_refs(key, value, fmt, meta) action that processes text around a reference. Only references with labels found in the 'labels' list are processed. Consider the markdown "{+@fig:1}", which represents a reference to a figure. "@" denotes a reference, "fig:1" is the reference's label, and "+" is a modifier. Valid modifiers are '+, '*' and '!'. This function strips curly braces and adds the modifiers to the attributes of Cite elements. Cite attributes must be detached before the document is written to STDOUT because pandoc doesn't recognize them. Alternatively, use an action from replace_refs_factory() to replace the references altogether.
[ "Returns", "process_refs", "(", "key", "value", "fmt", "meta", ")", "action", "that", "processes", "text", "around", "a", "reference", ".", "Only", "references", "with", "labels", "found", "in", "the", "labels", "list", "are", "processed", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L635-L670
tomduck/pandoc-xnos
pandocxnos/core.py
replace_refs_factory
def replace_refs_factory(references, use_cleveref_default, use_eqref, plusname, starname, target): """Returns replace_refs(key, value, fmt, meta) action that replaces references with format-specific content. The content is determined using the 'references' dict, which associates reference labels with numbers or string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default' is True, or if "modifier" in the reference's attributes is "+" or "*", then clever referencing is used; i.e., a name is placed in front of the number or string tag. The 'plusname' and 'starname' lists give the singular and plural names for "+" and "*" clever references, respectively. The 'target' is the LaTeX type for clever referencing (e.g., "figure", "equation", "table", ...).""" global _cleveref_tex_flag # pylint: disable=global-statement # Update global if clever referencing is required by default _cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default def _insert_cleveref_fakery(key, value, meta): r"""Inserts TeX to support clever referencing in LaTeX documents if the key isn't a RawBlock. If the key is a RawBlock, then check the value to see if the TeX was already inserted. The \providecommand macro is used to fake the cleveref package's behaviour if it is not provided in the template via \usepackage{cleveref}. TeX is inserted into the value. Replacement elements are returned. """ global _cleveref_tex_flag # pylint: disable=global-statement comment1 = '% pandoc-xnos: cleveref formatting' tex1 = [comment1, r'\crefformat{%s}{%s~#2#1#3}'%(target, plusname[0]), r'\Crefformat{%s}{%s~#2#1#3}'%(target, starname[0])] if key == 'RawBlock': # Check for existing cleveref TeX if value[1].startswith(comment1): # Append the new portion value[1] = value[1] + '\n' + '\n'.join(tex1[1:]) _cleveref_tex_flag = False # Cleveref fakery already installed elif key != 'RawBlock': # Write the cleveref TeX _cleveref_tex_flag = False # Cancels further attempts ret = [] # Check first to see if fakery is turned off if not 'xnos-cleveref-fake' in meta or \ check_bool(get_meta(meta, 'xnos-cleveref-fake')): # Cleveref fakery tex2 = [ r'% pandoc-xnos: cleveref fakery', r'\newcommand{\plusnamesingular}{}', r'\newcommand{\starnamesingular}{}', r'\newcommand{\xrefname}[1]{'\ r'\protect\renewcommand{\plusnamesingular}{#1}}', r'\newcommand{\Xrefname}[1]{'\ r'\protect\renewcommand{\starnamesingular}{#1}}', r'\providecommand{\cref}{\plusnamesingular~\ref}', r'\providecommand{\Cref}{\starnamesingular~\ref}', r'\providecommand{\crefformat}[2]{}', r'\providecommand{\Crefformat}[2]{}'] ret.append(RawBlock('tex', '\n'.join(tex2))) ret.append(RawBlock('tex', '\n'.join(tex1))) return ret return None def _cite_replacement(key, value, fmt, meta): """Returns context-dependent content to replace a Cite element.""" assert key == 'Cite' attrs, label = value[0], _get_label(key, value) attrs = PandocAttributes(attrs, 'pandoc') assert label in references # Get the replacement value text = str(references[label]) # Choose between \Cref, \cref and \ref use_cleveref = attrs['modifier'] in ['*', '+'] \ if 'modifier' in attrs.kvs else use_cleveref_default plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs \ else use_cleveref_default name = plusname[0] if plus else starname[0] # Name used by cref # The replacement depends on the output format if fmt == 'latex': if use_cleveref: # Renew commands needed for cleveref fakery if not 'xnos-cleveref-fake' in meta or \ check_bool(get_meta(meta, 'xnos-cleveref-fake')): faketex = (r'\xrefname' if plus else r'\Xrefname') + \ '{%s}' % name else: faketex = '' macro = r'\cref' if plus else r'\Cref' ret = RawInline('tex', r'%s%s{%s}'%(faketex, macro, label)) elif use_eqref: ret = RawInline('tex', r'\eqref{%s}'%label) else: ret = RawInline('tex', r'\ref{%s}'%label) else: if use_eqref: text = '(' + text + ')' linktext = [Math({"t":"InlineMath", "c":[]}, text[1:-1]) \ if text.startswith('$') and text.endswith('$') \ else Str(text)] link = elt('Link', 2)(linktext, ['#%s' % label, '']) \ if _PANDOCVERSION < '1.16' else \ Link(['', [], []], linktext, ['#%s' % label, '']) ret = ([Str(name), Space()] if use_cleveref else []) + [link] return ret def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument """Replaces references with format-specific content.""" if fmt == 'latex' and _cleveref_tex_flag: # Put the cleveref TeX fakery in front of the first block element # that isn't a RawBlock. if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock', 'BlockQuote', 'OrderedList', 'BulletList', 'DefinitionList', 'Header', 'HorizontalRule', 'Table', 'Div', 'Null']: return None # Reconstruct the block element el = _getel(key, value) # Insert cleveref TeX in front of the block element tex = _insert_cleveref_fakery(key, value, meta) if tex: return tex + [el] elif key == 'Cite' and len(value) == 3: # Replace the reference return _cite_replacement(key, value, fmt, meta) return None return replace_refs
python
def replace_refs_factory(references, use_cleveref_default, use_eqref, plusname, starname, target): """Returns replace_refs(key, value, fmt, meta) action that replaces references with format-specific content. The content is determined using the 'references' dict, which associates reference labels with numbers or string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default' is True, or if "modifier" in the reference's attributes is "+" or "*", then clever referencing is used; i.e., a name is placed in front of the number or string tag. The 'plusname' and 'starname' lists give the singular and plural names for "+" and "*" clever references, respectively. The 'target' is the LaTeX type for clever referencing (e.g., "figure", "equation", "table", ...).""" global _cleveref_tex_flag # pylint: disable=global-statement # Update global if clever referencing is required by default _cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default def _insert_cleveref_fakery(key, value, meta): r"""Inserts TeX to support clever referencing in LaTeX documents if the key isn't a RawBlock. If the key is a RawBlock, then check the value to see if the TeX was already inserted. The \providecommand macro is used to fake the cleveref package's behaviour if it is not provided in the template via \usepackage{cleveref}. TeX is inserted into the value. Replacement elements are returned. """ global _cleveref_tex_flag # pylint: disable=global-statement comment1 = '% pandoc-xnos: cleveref formatting' tex1 = [comment1, r'\crefformat{%s}{%s~#2#1#3}'%(target, plusname[0]), r'\Crefformat{%s}{%s~#2#1#3}'%(target, starname[0])] if key == 'RawBlock': # Check for existing cleveref TeX if value[1].startswith(comment1): # Append the new portion value[1] = value[1] + '\n' + '\n'.join(tex1[1:]) _cleveref_tex_flag = False # Cleveref fakery already installed elif key != 'RawBlock': # Write the cleveref TeX _cleveref_tex_flag = False # Cancels further attempts ret = [] # Check first to see if fakery is turned off if not 'xnos-cleveref-fake' in meta or \ check_bool(get_meta(meta, 'xnos-cleveref-fake')): # Cleveref fakery tex2 = [ r'% pandoc-xnos: cleveref fakery', r'\newcommand{\plusnamesingular}{}', r'\newcommand{\starnamesingular}{}', r'\newcommand{\xrefname}[1]{'\ r'\protect\renewcommand{\plusnamesingular}{#1}}', r'\newcommand{\Xrefname}[1]{'\ r'\protect\renewcommand{\starnamesingular}{#1}}', r'\providecommand{\cref}{\plusnamesingular~\ref}', r'\providecommand{\Cref}{\starnamesingular~\ref}', r'\providecommand{\crefformat}[2]{}', r'\providecommand{\Crefformat}[2]{}'] ret.append(RawBlock('tex', '\n'.join(tex2))) ret.append(RawBlock('tex', '\n'.join(tex1))) return ret return None def _cite_replacement(key, value, fmt, meta): """Returns context-dependent content to replace a Cite element.""" assert key == 'Cite' attrs, label = value[0], _get_label(key, value) attrs = PandocAttributes(attrs, 'pandoc') assert label in references # Get the replacement value text = str(references[label]) # Choose between \Cref, \cref and \ref use_cleveref = attrs['modifier'] in ['*', '+'] \ if 'modifier' in attrs.kvs else use_cleveref_default plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs \ else use_cleveref_default name = plusname[0] if plus else starname[0] # Name used by cref # The replacement depends on the output format if fmt == 'latex': if use_cleveref: # Renew commands needed for cleveref fakery if not 'xnos-cleveref-fake' in meta or \ check_bool(get_meta(meta, 'xnos-cleveref-fake')): faketex = (r'\xrefname' if plus else r'\Xrefname') + \ '{%s}' % name else: faketex = '' macro = r'\cref' if plus else r'\Cref' ret = RawInline('tex', r'%s%s{%s}'%(faketex, macro, label)) elif use_eqref: ret = RawInline('tex', r'\eqref{%s}'%label) else: ret = RawInline('tex', r'\ref{%s}'%label) else: if use_eqref: text = '(' + text + ')' linktext = [Math({"t":"InlineMath", "c":[]}, text[1:-1]) \ if text.startswith('$') and text.endswith('$') \ else Str(text)] link = elt('Link', 2)(linktext, ['#%s' % label, '']) \ if _PANDOCVERSION < '1.16' else \ Link(['', [], []], linktext, ['#%s' % label, '']) ret = ([Str(name), Space()] if use_cleveref else []) + [link] return ret def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument """Replaces references with format-specific content.""" if fmt == 'latex' and _cleveref_tex_flag: # Put the cleveref TeX fakery in front of the first block element # that isn't a RawBlock. if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock', 'BlockQuote', 'OrderedList', 'BulletList', 'DefinitionList', 'Header', 'HorizontalRule', 'Table', 'Div', 'Null']: return None # Reconstruct the block element el = _getel(key, value) # Insert cleveref TeX in front of the block element tex = _insert_cleveref_fakery(key, value, meta) if tex: return tex + [el] elif key == 'Cite' and len(value) == 3: # Replace the reference return _cite_replacement(key, value, fmt, meta) return None return replace_refs
[ "def", "replace_refs_factory", "(", "references", ",", "use_cleveref_default", ",", "use_eqref", ",", "plusname", ",", "starname", ",", "target", ")", ":", "global", "_cleveref_tex_flag", "# pylint: disable=global-statement", "# Update global if clever referencing is required b...
Returns replace_refs(key, value, fmt, meta) action that replaces references with format-specific content. The content is determined using the 'references' dict, which associates reference labels with numbers or string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default' is True, or if "modifier" in the reference's attributes is "+" or "*", then clever referencing is used; i.e., a name is placed in front of the number or string tag. The 'plusname' and 'starname' lists give the singular and plural names for "+" and "*" clever references, respectively. The 'target' is the LaTeX type for clever referencing (e.g., "figure", "equation", "table", ...).
[ "Returns", "replace_refs", "(", "key", "value", "fmt", "meta", ")", "action", "that", "replaces", "references", "with", "format", "-", "specific", "content", ".", "The", "content", "is", "determined", "using", "the", "references", "dict", "which", "associates", ...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L676-L823
tomduck/pandoc-xnos
pandocxnos/core.py
attach_attrs_factory
def attach_attrs_factory(f, extract_attrs=extract_attrs, allow_space=False): """Returns attach_attrs(key, value, fmt, meta) action that reads and attaches attributes to unattributed elements generated by the pandocfilters function f (e.g. pandocfilters.Math, etc). The extract_attrs() function should read the attributes and raise a ValueError or IndexError if attributes are not found. """ # Get the name name = f.__closure__[0].cell_contents def _attach_attrs(x): """Extracts and attaches the attributes.""" for i, v in enumerate(x): if v and v['t'] == name: # Find where the attributes start n = i+1 if allow_space and n < len(x) and x[n]['t'] == 'Space': n += 1 try: # Extract the attributes attrs = extract_attrs(x, n) x[i]['c'].insert(0, attrs) except (ValueError, IndexError): pass def attach_attrs(key, value, fmt, meta): # pylint: disable=unused-argument """Attaches attributes to an element.""" if key in ['Para', 'Plain']: _attach_attrs(value) # Image: Add pandoc's figure marker if warranted if len(value) == 1 and value[0]['t'] == 'Image': value[0]['c'][-1][1] = 'fig:' return attach_attrs
python
def attach_attrs_factory(f, extract_attrs=extract_attrs, allow_space=False): """Returns attach_attrs(key, value, fmt, meta) action that reads and attaches attributes to unattributed elements generated by the pandocfilters function f (e.g. pandocfilters.Math, etc). The extract_attrs() function should read the attributes and raise a ValueError or IndexError if attributes are not found. """ # Get the name name = f.__closure__[0].cell_contents def _attach_attrs(x): """Extracts and attaches the attributes.""" for i, v in enumerate(x): if v and v['t'] == name: # Find where the attributes start n = i+1 if allow_space and n < len(x) and x[n]['t'] == 'Space': n += 1 try: # Extract the attributes attrs = extract_attrs(x, n) x[i]['c'].insert(0, attrs) except (ValueError, IndexError): pass def attach_attrs(key, value, fmt, meta): # pylint: disable=unused-argument """Attaches attributes to an element.""" if key in ['Para', 'Plain']: _attach_attrs(value) # Image: Add pandoc's figure marker if warranted if len(value) == 1 and value[0]['t'] == 'Image': value[0]['c'][-1][1] = 'fig:' return attach_attrs
[ "def", "attach_attrs_factory", "(", "f", ",", "extract_attrs", "=", "extract_attrs", ",", "allow_space", "=", "False", ")", ":", "# Get the name", "name", "=", "f", ".", "__closure__", "[", "0", "]", ".", "cell_contents", "def", "_attach_attrs", "(", "x", ")...
Returns attach_attrs(key, value, fmt, meta) action that reads and attaches attributes to unattributed elements generated by the pandocfilters function f (e.g. pandocfilters.Math, etc). The extract_attrs() function should read the attributes and raise a ValueError or IndexError if attributes are not found.
[ "Returns", "attach_attrs", "(", "key", "value", "fmt", "meta", ")", "action", "that", "reads", "and", "attaches", "attributes", "to", "unattributed", "elements", "generated", "by", "the", "pandocfilters", "function", "f", "(", "e", ".", "g", ".", "pandocfilter...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L829-L863
tomduck/pandoc-xnos
pandocxnos/core.py
detach_attrs_factory
def detach_attrs_factory(f): """Returns detach_attrs(key, value, fmt, meta) action that detaches attributes attached to elements of type f (e.g. pandocfilters.Math, etc). Attributes provided natively by pandoc will be left as is.""" # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def detach_attrs(key, value, fmt, meta): # pylint: disable=unused-argument """Detaches the attributes.""" if key == name: assert len(value) <= n+1 if len(value) == n+1: # Make sure value[0] represents attributes then delete assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) del value[0] return detach_attrs
python
def detach_attrs_factory(f): """Returns detach_attrs(key, value, fmt, meta) action that detaches attributes attached to elements of type f (e.g. pandocfilters.Math, etc). Attributes provided natively by pandoc will be left as is.""" # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def detach_attrs(key, value, fmt, meta): # pylint: disable=unused-argument """Detaches the attributes.""" if key == name: assert len(value) <= n+1 if len(value) == n+1: # Make sure value[0] represents attributes then delete assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) del value[0] return detach_attrs
[ "def", "detach_attrs_factory", "(", "f", ")", ":", "# Get the name and standard length", "name", "=", "f", ".", "__closure__", "[", "0", "]", ".", "cell_contents", "n", "=", "f", ".", "__closure__", "[", "1", "]", ".", "cell_contents", "def", "detach_attrs", ...
Returns detach_attrs(key, value, fmt, meta) action that detaches attributes attached to elements of type f (e.g. pandocfilters.Math, etc). Attributes provided natively by pandoc will be left as is.
[ "Returns", "detach_attrs", "(", "key", "value", "fmt", "meta", ")", "action", "that", "detaches", "attributes", "attached", "to", "elements", "of", "type", "f", "(", "e", ".", "g", ".", "pandocfilters", ".", "Math", "etc", ")", ".", "Attributes", "provided...
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L868-L889
tomduck/pandoc-xnos
pandocxnos/core.py
insert_secnos_factory
def insert_secnos_factory(f): """Returns insert_secnos(key, value, fmt, meta) action that inserts section numbers into the attributes of elements of type f. """ # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def insert_secnos(key, value, fmt, meta): # pylint: disable=unused-argument """Inserts section numbers into elements attributes.""" global sec # pylint: disable=global-statement if 'xnos-number-sections' in meta and \ check_bool(get_meta(meta, 'xnos-number-sections')) and \ fmt in ['html', 'html5']: if key == 'Header': if 'unnumbered' in value[1][1]: return level = value[0] m = level - len(sec) if m > 0: sec.extend([0]*m) sec[level-1] += 1 sec = sec[:MAXLEVEL] if key == name: # Only insert if attributes are attached. Images always have # attributes. assert len(value) <= n+1 if name == 'Image' or len(value) == n+1: # Make sure value[0] represents attributes assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) # Insert the section number into the attributes s = '.'.join([str(m) for m in sec]) value[0][2].insert(0, ['secno', s]) return insert_secnos
python
def insert_secnos_factory(f): """Returns insert_secnos(key, value, fmt, meta) action that inserts section numbers into the attributes of elements of type f. """ # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def insert_secnos(key, value, fmt, meta): # pylint: disable=unused-argument """Inserts section numbers into elements attributes.""" global sec # pylint: disable=global-statement if 'xnos-number-sections' in meta and \ check_bool(get_meta(meta, 'xnos-number-sections')) and \ fmt in ['html', 'html5']: if key == 'Header': if 'unnumbered' in value[1][1]: return level = value[0] m = level - len(sec) if m > 0: sec.extend([0]*m) sec[level-1] += 1 sec = sec[:MAXLEVEL] if key == name: # Only insert if attributes are attached. Images always have # attributes. assert len(value) <= n+1 if name == 'Image' or len(value) == n+1: # Make sure value[0] represents attributes assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) # Insert the section number into the attributes s = '.'.join([str(m) for m in sec]) value[0][2].insert(0, ['secno', s]) return insert_secnos
[ "def", "insert_secnos_factory", "(", "f", ")", ":", "# Get the name and standard length", "name", "=", "f", ".", "__closure__", "[", "0", "]", ".", "cell_contents", "n", "=", "f", ".", "__closure__", "[", "1", "]", ".", "cell_contents", "def", "insert_secnos",...
Returns insert_secnos(key, value, fmt, meta) action that inserts section numbers into the attributes of elements of type f.
[ "Returns", "insert_secnos", "(", "key", "value", "fmt", "meta", ")", "action", "that", "inserts", "section", "numbers", "into", "the", "attributes", "of", "elements", "of", "type", "f", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L895-L938
tomduck/pandoc-xnos
pandocxnos/core.py
delete_secnos_factory
def delete_secnos_factory(f): """Returns delete_secnos(key, value, fmt, meta) action that deletes section numbers from the attributes of elements of type f. """ # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def delete_secnos(key, value, fmt, meta): # pylint: disable=unused-argument """Deletes section numbers from elements attributes.""" if 'xnos-number-sections' in meta and \ check_bool(get_meta(meta, 'xnos-number-sections')) and \ fmt in ['html', 'html5']: # Only delete if attributes are attached. Images always have # attributes. if key == name: assert len(value) <= n+1 if name == 'Image' or len(value) == n+1: # Make sure value[0] represents attributes assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) # Remove the secno attribute if value[0][2] and value[0][2][0][0] == 'secno': del value[0][2][0] return delete_secnos
python
def delete_secnos_factory(f): """Returns delete_secnos(key, value, fmt, meta) action that deletes section numbers from the attributes of elements of type f. """ # Get the name and standard length name = f.__closure__[0].cell_contents n = f.__closure__[1].cell_contents def delete_secnos(key, value, fmt, meta): # pylint: disable=unused-argument """Deletes section numbers from elements attributes.""" if 'xnos-number-sections' in meta and \ check_bool(get_meta(meta, 'xnos-number-sections')) and \ fmt in ['html', 'html5']: # Only delete if attributes are attached. Images always have # attributes. if key == name: assert len(value) <= n+1 if name == 'Image' or len(value) == n+1: # Make sure value[0] represents attributes assert len(value[0]) == 3 assert isinstance(value[0][0], STRTYPES) assert isinstance(value[0][1], list) assert isinstance(value[0][2], list) # Remove the secno attribute if value[0][2] and value[0][2][0][0] == 'secno': del value[0][2][0] return delete_secnos
[ "def", "delete_secnos_factory", "(", "f", ")", ":", "# Get the name and standard length", "name", "=", "f", ".", "__closure__", "[", "0", "]", ".", "cell_contents", "n", "=", "f", ".", "__closure__", "[", "1", "]", ".", "cell_contents", "def", "delete_secnos",...
Returns delete_secnos(key, value, fmt, meta) action that deletes section numbers from the attributes of elements of type f.
[ "Returns", "delete_secnos", "(", "key", "value", "fmt", "meta", ")", "action", "that", "deletes", "section", "numbers", "from", "the", "attributes", "of", "elements", "of", "type", "f", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L944-L975
tomduck/pandoc-xnos
pandocxnos/core.py
insert_rawblocks_factory
def insert_rawblocks_factory(rawblocks): r"""Returns insert_rawblocks(key, value, fmt, meta) action that inserts non-duplicate RawBlock elements. """ # pylint: disable=unused-argument def insert_rawblocks(key, value, fmt, meta): """Inserts non-duplicate RawBlock elements.""" if not rawblocks: return None # Put the RawBlock elements in front of the first block element that # isn't also a RawBlock. if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock', 'BlockQuote', 'OrderedList', 'BulletList', 'DefinitionList', 'Header', 'HorizontalRule', 'Table', 'Div', 'Null']: return None if key == 'RawBlock': # Remove duplicates rawblock = RawBlock(*value) if rawblock in rawblocks: rawblocks.remove(rawblock) return None if rawblocks: # Insert blocks el = _getel(key, value) return [rawblocks.pop(0) for i in range(len(rawblocks))] + [el] return None return insert_rawblocks
python
def insert_rawblocks_factory(rawblocks): r"""Returns insert_rawblocks(key, value, fmt, meta) action that inserts non-duplicate RawBlock elements. """ # pylint: disable=unused-argument def insert_rawblocks(key, value, fmt, meta): """Inserts non-duplicate RawBlock elements.""" if not rawblocks: return None # Put the RawBlock elements in front of the first block element that # isn't also a RawBlock. if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock', 'BlockQuote', 'OrderedList', 'BulletList', 'DefinitionList', 'Header', 'HorizontalRule', 'Table', 'Div', 'Null']: return None if key == 'RawBlock': # Remove duplicates rawblock = RawBlock(*value) if rawblock in rawblocks: rawblocks.remove(rawblock) return None if rawblocks: # Insert blocks el = _getel(key, value) return [rawblocks.pop(0) for i in range(len(rawblocks))] + [el] return None return insert_rawblocks
[ "def", "insert_rawblocks_factory", "(", "rawblocks", ")", ":", "# pylint: disable=unused-argument", "def", "insert_rawblocks", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "\"\"\"Inserts non-duplicate RawBlock elements.\"\"\"", "if", "not", "rawblocks", ...
r"""Returns insert_rawblocks(key, value, fmt, meta) action that inserts non-duplicate RawBlock elements.
[ "r", "Returns", "insert_rawblocks", "(", "key", "value", "fmt", "meta", ")", "action", "that", "inserts", "non", "-", "duplicate", "RawBlock", "elements", "." ]
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L980-L1013
celiao/rtsimple
rtsimple/movies.py
Movies.search
def search(self, **kwargs): """Get movies that match the search query string from the API. Args: q (optional): plain text search query; remember to URI encode page_limit (optional): number of search results to show per page, default=30 page (optional): results page number, default=1 Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('search') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def search(self, **kwargs): """Get movies that match the search query string from the API. Args: q (optional): plain text search query; remember to URI encode page_limit (optional): number of search results to show per page, default=30 page (optional): results page number, default=1 Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('search') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'search'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ...
Get movies that match the search query string from the API. Args: q (optional): plain text search query; remember to URI encode page_limit (optional): number of search results to show per page, default=30 page (optional): results page number, default=1 Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "movies", "that", "match", "the", "search", "query", "string", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/movies.py#L30-L46
celiao/rtsimple
rtsimple/movies.py
Movies.cast
def cast(self, **kwargs): """Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('cast') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def cast(self, **kwargs): """Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('cast') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "cast", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'cast'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ...
Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "the", "cast", "for", "a", "movie", "specified", "by", "id", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/movies.py#L60-L70
celiao/rtsimple
rtsimple/movies.py
Movies.clips
def clips(self, **kwargs): """Get related clips and trailers for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('clips') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def clips(self, **kwargs): """Get related clips and trailers for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('clips') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "clips", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'clips'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ...
Get related clips and trailers for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "related", "clips", "and", "trailers", "for", "a", "movie", "specified", "by", "id", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/movies.py#L72-L83
kunitoki/django-custard
custard/templatetags/custard_tags.py
debug
def debug(value): """ Simple tag to debug output a variable; Usage: {% debug request %} """ print("%s %s: " % (type(value), value)) print(dir(value)) print('\n\n') return ''
python
def debug(value): """ Simple tag to debug output a variable; Usage: {% debug request %} """ print("%s %s: " % (type(value), value)) print(dir(value)) print('\n\n') return ''
[ "def", "debug", "(", "value", ")", ":", "print", "(", "\"%s %s: \"", "%", "(", "type", "(", "value", ")", ",", "value", ")", ")", "print", "(", "dir", "(", "value", ")", ")", "print", "(", "'\\n\\n'", ")", "return", "''" ]
Simple tag to debug output a variable; Usage: {% debug request %}
[ "Simple", "tag", "to", "debug", "output", "a", "variable", ";" ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/templatetags/custard_tags.py#L14-L24
michal-stuglik/django-blastplus
blastplus/utils.py
get_sample_data
def get_sample_data(sample_file): """Read and returns sample data to fill form with default sample sequence. """ sequence_sample_in_fasta = None with open(sample_file) as handle: sequence_sample_in_fasta = handle.read() return sequence_sample_in_fasta
python
def get_sample_data(sample_file): """Read and returns sample data to fill form with default sample sequence. """ sequence_sample_in_fasta = None with open(sample_file) as handle: sequence_sample_in_fasta = handle.read() return sequence_sample_in_fasta
[ "def", "get_sample_data", "(", "sample_file", ")", ":", "sequence_sample_in_fasta", "=", "None", "with", "open", "(", "sample_file", ")", "as", "handle", ":", "sequence_sample_in_fasta", "=", "handle", ".", "read", "(", ")", "return", "sequence_sample_in_fasta" ]
Read and returns sample data to fill form with default sample sequence.
[ "Read", "and", "returns", "sample", "data", "to", "fill", "form", "with", "default", "sample", "sequence", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/utils.py#L11-L18
michal-stuglik/django-blastplus
blastplus/utils.py
blast_records_to_object
def blast_records_to_object(blast_records): """Transforms biopython's blast record into blast object defined in django-blastplus app. """ # container for transformed objects blast_objects_list = [] for blast_record in blast_records: br = BlastRecord(**{'query': blast_record.query, 'version': blast_record.version, 'expect': blast_record.expect, 'application': blast_record.application, 'reference': blast_record.reference}) for alignment in blast_record.alignments: al = Alignment(**{ 'hit_def': alignment.hit_def, 'title': alignment.title, 'length': alignment.length, }) for hsp in alignment.hsps: h = Hsp(**{ 'align_length': hsp.align_length, 'bits': hsp.bits, 'expect': hsp.expect, 'frame': hsp.frame, 'gaps': hsp.gaps, 'identities': hsp.identities, 'match': hsp.match, 'num_alignments': hsp.num_alignments, 'positives': hsp.positives, 'query': hsp.query, 'query_end': hsp.query_end, 'query_start': hsp.query_start, 'sbjct': hsp.sbjct, 'sbjct_end': hsp.sbjct_end, 'sbjct_start': hsp.sbjct_start, 'score': hsp.score, 'strand': hsp.strand, 'str': str(hsp), }) al.hsp_list.append(h) br.alignments.append(al) blast_objects_list.append(br) return blast_objects_list
python
def blast_records_to_object(blast_records): """Transforms biopython's blast record into blast object defined in django-blastplus app. """ # container for transformed objects blast_objects_list = [] for blast_record in blast_records: br = BlastRecord(**{'query': blast_record.query, 'version': blast_record.version, 'expect': blast_record.expect, 'application': blast_record.application, 'reference': blast_record.reference}) for alignment in blast_record.alignments: al = Alignment(**{ 'hit_def': alignment.hit_def, 'title': alignment.title, 'length': alignment.length, }) for hsp in alignment.hsps: h = Hsp(**{ 'align_length': hsp.align_length, 'bits': hsp.bits, 'expect': hsp.expect, 'frame': hsp.frame, 'gaps': hsp.gaps, 'identities': hsp.identities, 'match': hsp.match, 'num_alignments': hsp.num_alignments, 'positives': hsp.positives, 'query': hsp.query, 'query_end': hsp.query_end, 'query_start': hsp.query_start, 'sbjct': hsp.sbjct, 'sbjct_end': hsp.sbjct_end, 'sbjct_start': hsp.sbjct_start, 'score': hsp.score, 'strand': hsp.strand, 'str': str(hsp), }) al.hsp_list.append(h) br.alignments.append(al) blast_objects_list.append(br) return blast_objects_list
[ "def", "blast_records_to_object", "(", "blast_records", ")", ":", "# container for transformed objects", "blast_objects_list", "=", "[", "]", "for", "blast_record", "in", "blast_records", ":", "br", "=", "BlastRecord", "(", "*", "*", "{", "'query'", ":", "blast_reco...
Transforms biopython's blast record into blast object defined in django-blastplus app.
[ "Transforms", "biopython", "s", "blast", "record", "into", "blast", "object", "defined", "in", "django", "-", "blastplus", "app", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/utils.py#L21-L68
michal-stuglik/django-blastplus
blastplus/utils.py
run_blast_commands
def run_blast_commands(ncbicommandline_method, **keywords): """Runs blastplus/tblastn search, collects result and pass as a xml temporary file. """ # temporary files for output blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False) keywords['out'] = blast_out_tmp.name # unpack query temp file object query_file_object_tmp = keywords['query'] keywords['query'] = query_file_object_tmp.name stderr = '' error_string = '' try: # formating blastplus command blastplusx_cline = ncbicommandline_method(**keywords) stdout, stderr = blastplusx_cline() except ApplicationError as e: error_string = "Runtime error: " + stderr + "\n" + e.cmd # remove query temp file os.unlink(query_file_object_tmp.name) # os.remove(query_file_object_tmp.name) return blast_out_tmp, error_string
python
def run_blast_commands(ncbicommandline_method, **keywords): """Runs blastplus/tblastn search, collects result and pass as a xml temporary file. """ # temporary files for output blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False) keywords['out'] = blast_out_tmp.name # unpack query temp file object query_file_object_tmp = keywords['query'] keywords['query'] = query_file_object_tmp.name stderr = '' error_string = '' try: # formating blastplus command blastplusx_cline = ncbicommandline_method(**keywords) stdout, stderr = blastplusx_cline() except ApplicationError as e: error_string = "Runtime error: " + stderr + "\n" + e.cmd # remove query temp file os.unlink(query_file_object_tmp.name) # os.remove(query_file_object_tmp.name) return blast_out_tmp, error_string
[ "def", "run_blast_commands", "(", "ncbicommandline_method", ",", "*", "*", "keywords", ")", ":", "# temporary files for output", "blast_out_tmp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"w+\"", ",", "delete", "=", "False", ")", "keywords", ...
Runs blastplus/tblastn search, collects result and pass as a xml temporary file.
[ "Runs", "blastplus", "/", "tblastn", "search", "collects", "result", "and", "pass", "as", "a", "xml", "temporary", "file", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/utils.py#L71-L96
michal-stuglik/django-blastplus
blastplus/utils.py
get_annotation
def get_annotation(db_path, db_list): """ Checks if database is set as annotated. """ annotated = False for db in db_list: if db["path"] == db_path: annotated = db["annotated"] break return annotated
python
def get_annotation(db_path, db_list): """ Checks if database is set as annotated. """ annotated = False for db in db_list: if db["path"] == db_path: annotated = db["annotated"] break return annotated
[ "def", "get_annotation", "(", "db_path", ",", "db_list", ")", ":", "annotated", "=", "False", "for", "db", "in", "db_list", ":", "if", "db", "[", "\"path\"", "]", "==", "db_path", ":", "annotated", "=", "db", "[", "\"annotated\"", "]", "break", "return",...
Checks if database is set as annotated.
[ "Checks", "if", "database", "is", "set", "as", "annotated", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/utils.py#L99-L108
compbrain/dexcom_reader
dexcom_reader/util.py
find_usbserial
def find_usbserial(vendor, product): """Find the tty device for a given usbserial devices identifiers. Args: vendor: (int) something like 0x0000 product: (int) something like 0x0000 Returns: String, like /dev/ttyACM0 or /dev/tty.usb... """ if platform.system() == 'Linux': vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)] return linux_find_usbserial(vendor, product) elif platform.system() == 'Darwin': return osx_find_usbserial(vendor, product) else: raise NotImplementedError('Cannot find serial ports on %s' % platform.system())
python
def find_usbserial(vendor, product): """Find the tty device for a given usbserial devices identifiers. Args: vendor: (int) something like 0x0000 product: (int) something like 0x0000 Returns: String, like /dev/ttyACM0 or /dev/tty.usb... """ if platform.system() == 'Linux': vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)] return linux_find_usbserial(vendor, product) elif platform.system() == 'Darwin': return osx_find_usbserial(vendor, product) else: raise NotImplementedError('Cannot find serial ports on %s' % platform.system())
[ "def", "find_usbserial", "(", "vendor", ",", "product", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Linux'", ":", "vendor", ",", "product", "=", "[", "(", "'%04x'", "%", "(", "x", ")", ")", ".", "strip", "(", ")", "for", "x", "in...
Find the tty device for a given usbserial devices identifiers. Args: vendor: (int) something like 0x0000 product: (int) something like 0x0000 Returns: String, like /dev/ttyACM0 or /dev/tty.usb...
[ "Find", "the", "tty", "device", "for", "a", "given", "usbserial", "devices", "identifiers", "." ]
train
https://github.com/compbrain/dexcom_reader/blob/70f639bd755efe5271f033919e13ff276e406b19/dexcom_reader/util.py#L65-L82
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_fields
def create_fields(self, base_model=models.Model, base_manager=models.Manager): """ This method will create a model which will hold field types defined at runtime for each ContentType. :param base_model: base model class to inherit from :return: """ CONTENT_TYPES = self.content_types_query class CustomContentTypeFieldManager(base_manager): pass @python_2_unicode_compatible class CustomContentTypeField(base_model): DATATYPE_CHOICES = ( (CUSTOM_TYPE_TEXT, _('text')), (CUSTOM_TYPE_INTEGER, _('integer')), (CUSTOM_TYPE_FLOAT, _('float')), (CUSTOM_TYPE_TIME, _('time')), (CUSTOM_TYPE_DATE, _('date')), (CUSTOM_TYPE_DATETIME, _('datetime')), (CUSTOM_TYPE_BOOLEAN, _('boolean')), ) content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name='+', limit_choices_to=CONTENT_TYPES) name = models.CharField(_('name'), max_length=100, db_index=True) label = models.CharField(_('label'), max_length=100) data_type = models.CharField(_('data type'), max_length=8, choices=DATATYPE_CHOICES, db_index=True) help_text = models.CharField(_('help text'), max_length=200, blank=True, null=True) required = models.BooleanField(_('required'), default=False) searchable = models.BooleanField(_('searchable'), default=True) initial = models.CharField(_('initial'), max_length=200, blank=True, null=True) min_length = models.PositiveIntegerField(_('min length'), blank=True, null=True) max_length = models.PositiveIntegerField(_('max length'), blank=True, null=True) min_value = models.FloatField(_('min value'), blank=True, null=True) max_value = models.FloatField(_('max value'), blank=True, null=True) objects = CustomContentTypeFieldManager() class Meta: verbose_name = _('custom field') verbose_name_plural = _('custom fields') abstract = True def save(self, *args, **kwargs): super(CustomContentTypeField, self).save(*args, **kwargs) def clean(self): # if field is required must issue a initial value if self.required: # TODO - must create values for all instances that have not #print model.objects.values_list('pk', flat=True) #print self.field.filter(content_type=self.content_type) #objs = self.field.filter(content_type=self.content_type) \ # .exclude(object_id__in=model.objects.values_list('pk', flat=True)) #for obj in objs: # print obj pass def _check_validate_already_defined_in_model(self): model = self.content_type.model_class() if self.name in [f.name for f in model._meta.fields]: raise ValidationError({ 'name': (_('Custom field already defined as model field for content type %(model_name)s') % {'model_name': model.__name__},) }) def _check_validate_already_defined_in_custom_fields(self): model = self.content_type.model_class() qs = self.__class__._default_manager.filter( content_type=self.content_type, name=self.name, ) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): raise ValidationError({ 'name': (_('Custom field already defined for content type %(model_name)s') % {'model_name': model.__name__},) }) def __str__(self): return "%s" % self.name return CustomContentTypeField
python
def create_fields(self, base_model=models.Model, base_manager=models.Manager): """ This method will create a model which will hold field types defined at runtime for each ContentType. :param base_model: base model class to inherit from :return: """ CONTENT_TYPES = self.content_types_query class CustomContentTypeFieldManager(base_manager): pass @python_2_unicode_compatible class CustomContentTypeField(base_model): DATATYPE_CHOICES = ( (CUSTOM_TYPE_TEXT, _('text')), (CUSTOM_TYPE_INTEGER, _('integer')), (CUSTOM_TYPE_FLOAT, _('float')), (CUSTOM_TYPE_TIME, _('time')), (CUSTOM_TYPE_DATE, _('date')), (CUSTOM_TYPE_DATETIME, _('datetime')), (CUSTOM_TYPE_BOOLEAN, _('boolean')), ) content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name='+', limit_choices_to=CONTENT_TYPES) name = models.CharField(_('name'), max_length=100, db_index=True) label = models.CharField(_('label'), max_length=100) data_type = models.CharField(_('data type'), max_length=8, choices=DATATYPE_CHOICES, db_index=True) help_text = models.CharField(_('help text'), max_length=200, blank=True, null=True) required = models.BooleanField(_('required'), default=False) searchable = models.BooleanField(_('searchable'), default=True) initial = models.CharField(_('initial'), max_length=200, blank=True, null=True) min_length = models.PositiveIntegerField(_('min length'), blank=True, null=True) max_length = models.PositiveIntegerField(_('max length'), blank=True, null=True) min_value = models.FloatField(_('min value'), blank=True, null=True) max_value = models.FloatField(_('max value'), blank=True, null=True) objects = CustomContentTypeFieldManager() class Meta: verbose_name = _('custom field') verbose_name_plural = _('custom fields') abstract = True def save(self, *args, **kwargs): super(CustomContentTypeField, self).save(*args, **kwargs) def clean(self): # if field is required must issue a initial value if self.required: # TODO - must create values for all instances that have not #print model.objects.values_list('pk', flat=True) #print self.field.filter(content_type=self.content_type) #objs = self.field.filter(content_type=self.content_type) \ # .exclude(object_id__in=model.objects.values_list('pk', flat=True)) #for obj in objs: # print obj pass def _check_validate_already_defined_in_model(self): model = self.content_type.model_class() if self.name in [f.name for f in model._meta.fields]: raise ValidationError({ 'name': (_('Custom field already defined as model field for content type %(model_name)s') % {'model_name': model.__name__},) }) def _check_validate_already_defined_in_custom_fields(self): model = self.content_type.model_class() qs = self.__class__._default_manager.filter( content_type=self.content_type, name=self.name, ) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): raise ValidationError({ 'name': (_('Custom field already defined for content type %(model_name)s') % {'model_name': model.__name__},) }) def __str__(self): return "%s" % self.name return CustomContentTypeField
[ "def", "create_fields", "(", "self", ",", "base_model", "=", "models", ".", "Model", ",", "base_manager", "=", "models", ".", "Manager", ")", ":", "CONTENT_TYPES", "=", "self", ".", "content_types_query", "class", "CustomContentTypeFieldManager", "(", "base_manage...
This method will create a model which will hold field types defined at runtime for each ContentType. :param base_model: base model class to inherit from :return:
[ "This", "method", "will", "create", "a", "model", "which", "will", "hold", "field", "types", "defined", "at", "runtime", "for", "each", "ContentType", "." ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L65-L148
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_values
def create_values(self, base_model=models.Model, base_manager=models.Manager): """ This method will create a model which will hold field values for field types of custom_field_model. :param base_model: :param base_manager: :return: """ _builder = self class CustomContentTypeFieldValueManager(base_manager): def create(self, **kwargs): """ Subclass create in order to be able to use "value" in kwargs instead of using "value_%s" passing also type directly """ if 'value' in kwargs: value = kwargs.pop('value') created_object = super(CustomContentTypeFieldValueManager, self).create(**kwargs) created_object.value = value return created_object else: return super(CustomContentTypeFieldValueManager, self).create(**kwargs) @python_2_unicode_compatible class CustomContentTypeFieldValue(base_model): custom_field = models.ForeignKey('.'.join(_builder.fields_model), verbose_name=_('custom field'), related_name='+') content_type = models.ForeignKey(ContentType, editable=False, verbose_name=_('content type'), limit_choices_to=_builder.content_types_query) object_id = models.PositiveIntegerField(_('object id'), db_index=True) content_object = GenericForeignKey('content_type', 'object_id') value_text = models.TextField(blank=True, null=True) value_integer = models.IntegerField(blank=True, null=True) value_float = models.FloatField(blank=True, null=True) value_time = models.TimeField(blank=True, null=True) value_date = models.DateField(blank=True, null=True) value_datetime = models.DateTimeField(blank=True, null=True) value_boolean = models.NullBooleanField(blank=True) objects = CustomContentTypeFieldValueManager() def _get_value(self): return getattr(self, 'value_%s' % self.custom_field.data_type) def _set_value(self, new_value): setattr(self, 'value_%s' % self.custom_field.data_type, new_value) value = property(_get_value, _set_value) class Meta: unique_together = ('custom_field', 'content_type', 'object_id') verbose_name = _('custom field value') verbose_name_plural = _('custom field values') abstract = True def save(self, *args, **kwargs): # save content type as user shouldn't be able to change it self.content_type = self.custom_field.content_type super(CustomContentTypeFieldValue, self).save(*args, **kwargs) def validate_unique(self, exclude=None): qs = self.__class__._default_manager.filter( custom_field=self.custom_field, content_type=self.custom_field.content_type, object_id=self.object_id, ) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): raise ValidationError({ NON_FIELD_ERRORS: (_('A value for this custom field already exists'),) }) def __str__(self): return "%s: %s" % (self.custom_field.name, self.value) return CustomContentTypeFieldValue
python
def create_values(self, base_model=models.Model, base_manager=models.Manager): """ This method will create a model which will hold field values for field types of custom_field_model. :param base_model: :param base_manager: :return: """ _builder = self class CustomContentTypeFieldValueManager(base_manager): def create(self, **kwargs): """ Subclass create in order to be able to use "value" in kwargs instead of using "value_%s" passing also type directly """ if 'value' in kwargs: value = kwargs.pop('value') created_object = super(CustomContentTypeFieldValueManager, self).create(**kwargs) created_object.value = value return created_object else: return super(CustomContentTypeFieldValueManager, self).create(**kwargs) @python_2_unicode_compatible class CustomContentTypeFieldValue(base_model): custom_field = models.ForeignKey('.'.join(_builder.fields_model), verbose_name=_('custom field'), related_name='+') content_type = models.ForeignKey(ContentType, editable=False, verbose_name=_('content type'), limit_choices_to=_builder.content_types_query) object_id = models.PositiveIntegerField(_('object id'), db_index=True) content_object = GenericForeignKey('content_type', 'object_id') value_text = models.TextField(blank=True, null=True) value_integer = models.IntegerField(blank=True, null=True) value_float = models.FloatField(blank=True, null=True) value_time = models.TimeField(blank=True, null=True) value_date = models.DateField(blank=True, null=True) value_datetime = models.DateTimeField(blank=True, null=True) value_boolean = models.NullBooleanField(blank=True) objects = CustomContentTypeFieldValueManager() def _get_value(self): return getattr(self, 'value_%s' % self.custom_field.data_type) def _set_value(self, new_value): setattr(self, 'value_%s' % self.custom_field.data_type, new_value) value = property(_get_value, _set_value) class Meta: unique_together = ('custom_field', 'content_type', 'object_id') verbose_name = _('custom field value') verbose_name_plural = _('custom field values') abstract = True def save(self, *args, **kwargs): # save content type as user shouldn't be able to change it self.content_type = self.custom_field.content_type super(CustomContentTypeFieldValue, self).save(*args, **kwargs) def validate_unique(self, exclude=None): qs = self.__class__._default_manager.filter( custom_field=self.custom_field, content_type=self.custom_field.content_type, object_id=self.object_id, ) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): raise ValidationError({ NON_FIELD_ERRORS: (_('A value for this custom field already exists'),) }) def __str__(self): return "%s: %s" % (self.custom_field.name, self.value) return CustomContentTypeFieldValue
[ "def", "create_values", "(", "self", ",", "base_model", "=", "models", ".", "Model", ",", "base_manager", "=", "models", ".", "Manager", ")", ":", "_builder", "=", "self", "class", "CustomContentTypeFieldValueManager", "(", "base_manager", ")", ":", "def", "cr...
This method will create a model which will hold field values for field types of custom_field_model. :param base_model: :param base_manager: :return:
[ "This", "method", "will", "create", "a", "model", "which", "will", "hold", "field", "values", "for", "field", "types", "of", "custom_field_model", "." ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L151-L231
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_manager
def create_manager(self, base_manager=models.Manager): """ This will create the custom Manager that will use the fields_model and values_model respectively. :param base_manager: the base manager class to inherit from :return: """ _builder = self class CustomManager(base_manager): def search(self, search_data, custom_args={}): """ Search inside the custom fields for this model for any match of search_data and returns existing model instances :param search_data: :param custom_args: :return: """ query = None lookups = ( '%s__%s' % ('value_text', 'icontains'), ) content_type = ContentType.objects.get_for_model(self.model) custom_args = dict({ 'content_type': content_type, 'searchable': True }, **custom_args) custom_fields = dict((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args)) for value_lookup in lookups: for key, f in custom_fields.items(): found = _builder.values_model_class.objects.filter(**{ 'custom_field': f, 'content_type': content_type, value_lookup: search_data }) if found.count() > 0: if query is None: query = Q() query = query & Q(**{ str('%s__in' % self.model._meta.pk.name): [obj.object_id for obj in found] }) if query is None: return self.get_queryset().none() return self.get_queryset().filter(query) return CustomManager
python
def create_manager(self, base_manager=models.Manager): """ This will create the custom Manager that will use the fields_model and values_model respectively. :param base_manager: the base manager class to inherit from :return: """ _builder = self class CustomManager(base_manager): def search(self, search_data, custom_args={}): """ Search inside the custom fields for this model for any match of search_data and returns existing model instances :param search_data: :param custom_args: :return: """ query = None lookups = ( '%s__%s' % ('value_text', 'icontains'), ) content_type = ContentType.objects.get_for_model(self.model) custom_args = dict({ 'content_type': content_type, 'searchable': True }, **custom_args) custom_fields = dict((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args)) for value_lookup in lookups: for key, f in custom_fields.items(): found = _builder.values_model_class.objects.filter(**{ 'custom_field': f, 'content_type': content_type, value_lookup: search_data }) if found.count() > 0: if query is None: query = Q() query = query & Q(**{ str('%s__in' % self.model._meta.pk.name): [obj.object_id for obj in found] }) if query is None: return self.get_queryset().none() return self.get_queryset().filter(query) return CustomManager
[ "def", "create_manager", "(", "self", ",", "base_manager", "=", "models", ".", "Manager", ")", ":", "_builder", "=", "self", "class", "CustomManager", "(", "base_manager", ")", ":", "def", "search", "(", "self", ",", "search_data", ",", "custom_args", "=", ...
This will create the custom Manager that will use the fields_model and values_model respectively. :param base_manager: the base manager class to inherit from :return:
[ "This", "will", "create", "the", "custom", "Manager", "that", "will", "use", "the", "fields_model", "and", "values_model", "respectively", "." ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L234-L276
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_mixin
def create_mixin(self): """ This will create the custom Model Mixin to attach to your custom field enabled model. :return: """ _builder = self class CustomModelMixin(object): @cached_property def _content_type(self): return ContentType.objects.get_for_model(self) @classmethod def get_model_custom_fields(cls): """ Return a list of custom fields for this model, callable at model level """ return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls)) def get_custom_fields(self): """ Return a list of custom fields for this model """ return _builder.fields_model_class.objects.filter(content_type=self._content_type) def get_custom_value(self, field): """ Get a value for a specified custom field """ return _builder.values_model_class.objects.get(custom_field=field, content_type=self._content_type, object_id=self.pk) def set_custom_value(self, field, value): """ Set a value for a specified custom field """ custom_value, created = \ _builder.values_model_class.objects.get_or_create(custom_field=field, content_type=self._content_type, object_id=self.pk) custom_value.value = value custom_value.full_clean() custom_value.save() return custom_value #def __getattr__(self, name): # """ Get a value for a specified custom field """ # try: # obj = _builder.values_model_class.objects.get(custom_field__name=name, # content_type=self._content_type, # object_id=self.pk) # return obj.value # except ObjectDoesNotExist: # pass # return super(CustomModelMixin, self).__getattr__(name) return CustomModelMixin
python
def create_mixin(self): """ This will create the custom Model Mixin to attach to your custom field enabled model. :return: """ _builder = self class CustomModelMixin(object): @cached_property def _content_type(self): return ContentType.objects.get_for_model(self) @classmethod def get_model_custom_fields(cls): """ Return a list of custom fields for this model, callable at model level """ return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls)) def get_custom_fields(self): """ Return a list of custom fields for this model """ return _builder.fields_model_class.objects.filter(content_type=self._content_type) def get_custom_value(self, field): """ Get a value for a specified custom field """ return _builder.values_model_class.objects.get(custom_field=field, content_type=self._content_type, object_id=self.pk) def set_custom_value(self, field, value): """ Set a value for a specified custom field """ custom_value, created = \ _builder.values_model_class.objects.get_or_create(custom_field=field, content_type=self._content_type, object_id=self.pk) custom_value.value = value custom_value.full_clean() custom_value.save() return custom_value #def __getattr__(self, name): # """ Get a value for a specified custom field """ # try: # obj = _builder.values_model_class.objects.get(custom_field__name=name, # content_type=self._content_type, # object_id=self.pk) # return obj.value # except ObjectDoesNotExist: # pass # return super(CustomModelMixin, self).__getattr__(name) return CustomModelMixin
[ "def", "create_mixin", "(", "self", ")", ":", "_builder", "=", "self", "class", "CustomModelMixin", "(", "object", ")", ":", "@", "cached_property", "def", "_content_type", "(", "self", ")", ":", "return", "ContentType", ".", "objects", ".", "get_for_model", ...
This will create the custom Model Mixin to attach to your custom field enabled model. :return:
[ "This", "will", "create", "the", "custom", "Model", "Mixin", "to", "attach", "to", "your", "custom", "field", "enabled", "model", "." ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L279-L331
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_modelform
def create_modelform(self, base_form=forms.ModelForm, field_types=settings.CUSTOM_FIELD_TYPES, widget_types=settings.CUSTOM_WIDGET_TYPES): """ This creates the class that implements a ModelForm that knows about the custom fields :param base_form: :param field_types: :param widget_types: :return: """ _builder = self class CustomFieldModelBaseForm(base_form): def __init__(self, *args, **kwargs): """ Constructor """ # additional form variables self.custom_classes = None self.is_custom_form = True self.instance = None # construct the form super(CustomFieldModelBaseForm, self).__init__(*args, **kwargs) # init custom fields from model in the form self.init_custom_fields() def clean(self): """ Clean the form """ cleaned_data = super(CustomFieldModelBaseForm, self).clean() return cleaned_data def save(self, commit=True): """ Save the form """ self.instance = super(CustomFieldModelBaseForm, self).save(commit=commit) if self.instance and commit: self.instance.save() self.save_custom_fields() return self.instance def init_custom_fields(self): """ Populate the ``form.fields[]`` with the additional fields coming from the custom fields models. """ content_type = self.get_content_type() fields = self.get_fields_for_content_type(content_type) for f in fields: name = str(f.name) initial = f.initial self.fields[name] = self.get_formfield_for_field(f) self.fields[name].is_custom = True self.fields[name].label = f.label self.fields[name].required = f.required self.fields[name].widget = self.get_widget_for_field(f) if self.instance and self.instance.pk: value = self.search_value_for_field(f, content_type, self.instance.pk) if len(value) > 0: initial = value[0].value self.fields[name].initial = self.initial[name] = initial def save_custom_fields(self): """ Perform save and validation over the custom fields """ if not self.instance.pk: raise Exception("The model instance has not been saved. Have you called instance.save() ?") content_type = self.get_content_type() fields = self.get_fields_for_content_type(content_type) for f in fields: name = str(f.name) fv = self.search_value_for_field(f, content_type, self.instance.pk) if len(fv) > 0: value = fv[0] value.value = self.cleaned_data[name] else: value = self.create_value_for_field(f, self.instance.pk, self.cleaned_data[name]) value.save() def get_model(self): """ Returns the actual model this ``ModelForm`` is referring to """ return self._meta.model def get_content_type(self): """ Returns the content type instance of the model this ``ModelForm`` is referring to """ return ContentType.objects.get_for_model(self.get_model()) def get_formfield_for_field(self, field): """ Returns the defined formfield instance built from the type of the field :param field: custom field instance :return: the formfield instance """ field_attrs = { 'label': field.label, 'help_text': field.help_text, 'required': field.required, } if field.data_type == CUSTOM_TYPE_TEXT: #widget_attrs = {} if field.min_length: field_attrs['min_length'] = field.min_length if field.max_length: field_attrs['max_length'] = field.max_length # widget_attrs['maxlength'] = field.max_length #field_attrs['widget'] = widgets.AdminTextInputWidget(attrs=widget_attrs) elif field.data_type == CUSTOM_TYPE_INTEGER: if field.min_value: field_attrs['min_value'] = int(float(field.min_value)) if field.max_value: field_attrs['max_value'] = int(float(field.max_value)) #field_attrs['widget'] = spinner.IntegerSpinnerWidget(attrs=field_attrs) elif field.data_type == CUSTOM_TYPE_FLOAT: if field.min_value: field_attrs['min_value'] = float(field.min_value) if field.max_value: field_attrs['max_value'] = float(field.max_value) #field_attrs['widget'] = spinner.SpinnerWidget(attrs=field_attrs) elif field.data_type == CUSTOM_TYPE_TIME: #field_attrs['widget'] = date.TimePickerWidget() pass elif field.data_type == CUSTOM_TYPE_DATE: #field_attrs['widget'] = date.DatePickerWidget() pass elif field.data_type == CUSTOM_TYPE_DATETIME: #field_attrs['widget'] = date.DateTimePickerWidget() pass elif field.data_type == CUSTOM_TYPE_BOOLEAN: pass field_type = import_class(field_types[field.data_type]) return field_type(**field_attrs) def get_widget_for_field(self, field, attrs={}): """ Returns the defined widget type instance built from the type of the field :param field: custom field instance :param attrs: attributes of widgets :return: the widget instance """ return import_class(widget_types[field.data_type])(**attrs) def get_fields_for_content_type(self, content_type): """ Returns all fields for a given content type Example implementation: return MyCustomField.objects.filter(content_type=content_type) :param content_type: content type to search :return: the custom field instances """ return _builder.fields_model_class.objects.filter(content_type=content_type) def search_value_for_field(self, field, content_type, object_id): """ This function will return the CustomFieldValue instance for a given field of an object that has the given content_type Example implementation: return MyCustomFieldValue.objects.filter(custom_field=field, content_type=content_type, object_id=object_id) :param field: the custom field instance :param content_type: the content type instance :param object_id: the object id this value is referring to :return: CustomFieldValue queryset """ return _builder.values_model_class.objects.filter(custom_field=field, content_type=content_type, object_id=object_id) def create_value_for_field(self, field, object_id, value): """ Create a value for a given field of an object Example implementation: return MyCustomFieldValue(custom_field=field, object_id=object_id, value=value) :param field: the custom field instance :param object_id: the object id this value is referring to :param value: the value to set :return: the value instance (not saved!) """ return _builder.values_model_class(custom_field=field, object_id=object_id, value=value) return CustomFieldModelBaseForm
python
def create_modelform(self, base_form=forms.ModelForm, field_types=settings.CUSTOM_FIELD_TYPES, widget_types=settings.CUSTOM_WIDGET_TYPES): """ This creates the class that implements a ModelForm that knows about the custom fields :param base_form: :param field_types: :param widget_types: :return: """ _builder = self class CustomFieldModelBaseForm(base_form): def __init__(self, *args, **kwargs): """ Constructor """ # additional form variables self.custom_classes = None self.is_custom_form = True self.instance = None # construct the form super(CustomFieldModelBaseForm, self).__init__(*args, **kwargs) # init custom fields from model in the form self.init_custom_fields() def clean(self): """ Clean the form """ cleaned_data = super(CustomFieldModelBaseForm, self).clean() return cleaned_data def save(self, commit=True): """ Save the form """ self.instance = super(CustomFieldModelBaseForm, self).save(commit=commit) if self.instance and commit: self.instance.save() self.save_custom_fields() return self.instance def init_custom_fields(self): """ Populate the ``form.fields[]`` with the additional fields coming from the custom fields models. """ content_type = self.get_content_type() fields = self.get_fields_for_content_type(content_type) for f in fields: name = str(f.name) initial = f.initial self.fields[name] = self.get_formfield_for_field(f) self.fields[name].is_custom = True self.fields[name].label = f.label self.fields[name].required = f.required self.fields[name].widget = self.get_widget_for_field(f) if self.instance and self.instance.pk: value = self.search_value_for_field(f, content_type, self.instance.pk) if len(value) > 0: initial = value[0].value self.fields[name].initial = self.initial[name] = initial def save_custom_fields(self): """ Perform save and validation over the custom fields """ if not self.instance.pk: raise Exception("The model instance has not been saved. Have you called instance.save() ?") content_type = self.get_content_type() fields = self.get_fields_for_content_type(content_type) for f in fields: name = str(f.name) fv = self.search_value_for_field(f, content_type, self.instance.pk) if len(fv) > 0: value = fv[0] value.value = self.cleaned_data[name] else: value = self.create_value_for_field(f, self.instance.pk, self.cleaned_data[name]) value.save() def get_model(self): """ Returns the actual model this ``ModelForm`` is referring to """ return self._meta.model def get_content_type(self): """ Returns the content type instance of the model this ``ModelForm`` is referring to """ return ContentType.objects.get_for_model(self.get_model()) def get_formfield_for_field(self, field): """ Returns the defined formfield instance built from the type of the field :param field: custom field instance :return: the formfield instance """ field_attrs = { 'label': field.label, 'help_text': field.help_text, 'required': field.required, } if field.data_type == CUSTOM_TYPE_TEXT: #widget_attrs = {} if field.min_length: field_attrs['min_length'] = field.min_length if field.max_length: field_attrs['max_length'] = field.max_length # widget_attrs['maxlength'] = field.max_length #field_attrs['widget'] = widgets.AdminTextInputWidget(attrs=widget_attrs) elif field.data_type == CUSTOM_TYPE_INTEGER: if field.min_value: field_attrs['min_value'] = int(float(field.min_value)) if field.max_value: field_attrs['max_value'] = int(float(field.max_value)) #field_attrs['widget'] = spinner.IntegerSpinnerWidget(attrs=field_attrs) elif field.data_type == CUSTOM_TYPE_FLOAT: if field.min_value: field_attrs['min_value'] = float(field.min_value) if field.max_value: field_attrs['max_value'] = float(field.max_value) #field_attrs['widget'] = spinner.SpinnerWidget(attrs=field_attrs) elif field.data_type == CUSTOM_TYPE_TIME: #field_attrs['widget'] = date.TimePickerWidget() pass elif field.data_type == CUSTOM_TYPE_DATE: #field_attrs['widget'] = date.DatePickerWidget() pass elif field.data_type == CUSTOM_TYPE_DATETIME: #field_attrs['widget'] = date.DateTimePickerWidget() pass elif field.data_type == CUSTOM_TYPE_BOOLEAN: pass field_type = import_class(field_types[field.data_type]) return field_type(**field_attrs) def get_widget_for_field(self, field, attrs={}): """ Returns the defined widget type instance built from the type of the field :param field: custom field instance :param attrs: attributes of widgets :return: the widget instance """ return import_class(widget_types[field.data_type])(**attrs) def get_fields_for_content_type(self, content_type): """ Returns all fields for a given content type Example implementation: return MyCustomField.objects.filter(content_type=content_type) :param content_type: content type to search :return: the custom field instances """ return _builder.fields_model_class.objects.filter(content_type=content_type) def search_value_for_field(self, field, content_type, object_id): """ This function will return the CustomFieldValue instance for a given field of an object that has the given content_type Example implementation: return MyCustomFieldValue.objects.filter(custom_field=field, content_type=content_type, object_id=object_id) :param field: the custom field instance :param content_type: the content type instance :param object_id: the object id this value is referring to :return: CustomFieldValue queryset """ return _builder.values_model_class.objects.filter(custom_field=field, content_type=content_type, object_id=object_id) def create_value_for_field(self, field, object_id, value): """ Create a value for a given field of an object Example implementation: return MyCustomFieldValue(custom_field=field, object_id=object_id, value=value) :param field: the custom field instance :param object_id: the object id this value is referring to :param value: the value to set :return: the value instance (not saved!) """ return _builder.values_model_class(custom_field=field, object_id=object_id, value=value) return CustomFieldModelBaseForm
[ "def", "create_modelform", "(", "self", ",", "base_form", "=", "forms", ".", "ModelForm", ",", "field_types", "=", "settings", ".", "CUSTOM_FIELD_TYPES", ",", "widget_types", "=", "settings", ".", "CUSTOM_WIDGET_TYPES", ")", ":", "_builder", "=", "self", "class"...
This creates the class that implements a ModelForm that knows about the custom fields :param base_form: :param field_types: :param widget_types: :return:
[ "This", "creates", "the", "class", "that", "implements", "a", "ModelForm", "that", "knows", "about", "the", "custom", "fields" ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L334-L544
kunitoki/django-custard
custard/builder.py
CustomFieldsBuilder.create_modeladmin
def create_modeladmin(self, base_admin=admin.ModelAdmin): """ This creates the class that implements a ModelForm that knows about the custom fields :param base_admin: :return: """ _builder = self class CustomFieldModelBaseAdmin(base_admin): def __init__(self, *args, **kwargs): super(CustomFieldModelBaseAdmin, self).__init__(*args, **kwargs) def save_model(self, request, obj, form, change): obj.save() if hasattr(form, 'save_custom_fields'): form.save_custom_fields() return CustomFieldModelBaseAdmin
python
def create_modeladmin(self, base_admin=admin.ModelAdmin): """ This creates the class that implements a ModelForm that knows about the custom fields :param base_admin: :return: """ _builder = self class CustomFieldModelBaseAdmin(base_admin): def __init__(self, *args, **kwargs): super(CustomFieldModelBaseAdmin, self).__init__(*args, **kwargs) def save_model(self, request, obj, form, change): obj.save() if hasattr(form, 'save_custom_fields'): form.save_custom_fields() return CustomFieldModelBaseAdmin
[ "def", "create_modeladmin", "(", "self", ",", "base_admin", "=", "admin", ".", "ModelAdmin", ")", ":", "_builder", "=", "self", "class", "CustomFieldModelBaseAdmin", "(", "base_admin", ")", ":", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "...
This creates the class that implements a ModelForm that knows about the custom fields :param base_admin: :return:
[ "This", "creates", "the", "class", "that", "implements", "a", "ModelForm", "that", "knows", "about", "the", "custom", "fields" ]
train
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L547-L567
pudo/banal
banal/cache.py
bytes_iter
def bytes_iter(obj): """Turn a complex object into an iterator of byte strings. The resulting iterator can be used for caching. """ if obj is None: return elif isinstance(obj, six.binary_type): yield obj elif isinstance(obj, six.string_types): yield obj elif isinstance(obj, (date, datetime)): yield obj.isoformat() elif is_mapping(obj): for key in sorted(obj.keys()): for out in chain(bytes_iter(key), bytes_iter(obj[key])): yield out elif is_sequence(obj): if isinstance(obj, (list, set)): try: obj = sorted(obj) except Exception: pass for item in obj: for out in bytes_iter(item): yield out elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType)): yield getattr(obj, 'func_name', '') else: yield six.text_type(obj)
python
def bytes_iter(obj): """Turn a complex object into an iterator of byte strings. The resulting iterator can be used for caching. """ if obj is None: return elif isinstance(obj, six.binary_type): yield obj elif isinstance(obj, six.string_types): yield obj elif isinstance(obj, (date, datetime)): yield obj.isoformat() elif is_mapping(obj): for key in sorted(obj.keys()): for out in chain(bytes_iter(key), bytes_iter(obj[key])): yield out elif is_sequence(obj): if isinstance(obj, (list, set)): try: obj = sorted(obj) except Exception: pass for item in obj: for out in bytes_iter(item): yield out elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType)): yield getattr(obj, 'func_name', '') else: yield six.text_type(obj)
[ "def", "bytes_iter", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "elif", "isinstance", "(", "obj", ",", "six", ".", "binary_type", ")", ":", "yield", "obj", "elif", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", "...
Turn a complex object into an iterator of byte strings. The resulting iterator can be used for caching.
[ "Turn", "a", "complex", "object", "into", "an", "iterator", "of", "byte", "strings", ".", "The", "resulting", "iterator", "can", "be", "used", "for", "caching", "." ]
train
https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/cache.py#L11-L40
pudo/banal
banal/cache.py
hash_data
def hash_data(obj): """Generate a SHA1 from a complex object.""" collect = sha1() for text in bytes_iter(obj): if isinstance(text, six.text_type): text = text.encode('utf-8') collect.update(text) return collect.hexdigest()
python
def hash_data(obj): """Generate a SHA1 from a complex object.""" collect = sha1() for text in bytes_iter(obj): if isinstance(text, six.text_type): text = text.encode('utf-8') collect.update(text) return collect.hexdigest()
[ "def", "hash_data", "(", "obj", ")", ":", "collect", "=", "sha1", "(", ")", "for", "text", "in", "bytes_iter", "(", "obj", ")", ":", "if", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "text", "=", "text", ".", "encode", "(", ...
Generate a SHA1 from a complex object.
[ "Generate", "a", "SHA1", "from", "a", "complex", "object", "." ]
train
https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/cache.py#L43-L50
ProgVal/markovgen
markovgen/markovgen.py
Markov.triples
def triples(self, words): """ Generates triples from the given data string. So if our string were "What a lovely day", we'd generate (What, a, lovely) and then (a, lovely, day). """ if len(words) < 3: return for i in range(len(words) - 2): yield (words[i], words[i+1], words[i+2])
python
def triples(self, words): """ Generates triples from the given data string. So if our string were "What a lovely day", we'd generate (What, a, lovely) and then (a, lovely, day). """ if len(words) < 3: return for i in range(len(words) - 2): yield (words[i], words[i+1], words[i+2])
[ "def", "triples", "(", "self", ",", "words", ")", ":", "if", "len", "(", "words", ")", "<", "3", ":", "return", "for", "i", "in", "range", "(", "len", "(", "words", ")", "-", "2", ")", ":", "yield", "(", "words", "[", "i", "]", ",", "words", ...
Generates triples from the given data string. So if our string were "What a lovely day", we'd generate (What, a, lovely) and then (a, lovely, day).
[ "Generates", "triples", "from", "the", "given", "data", "string", ".", "So", "if", "our", "string", "were", "What", "a", "lovely", "day", "we", "d", "generate", "(", "What", "a", "lovely", ")", "and", "then", "(", "a", "lovely", "day", ")", "." ]
train
https://github.com/ProgVal/markovgen/blob/d3124f211c2eacfe086f16399c7f521d80e20764/markovgen/markovgen.py#L34-L44
schettino72/import-deps
import_deps/__init__.py
ast_imports
def ast_imports(file_path): """get list of import from python module :return: (list - tuple) (module, name, asname, level) """ with open(file_path, 'r') as fp: text = fp.read() mod_ast = ast.parse(text, file_path) finder = _ImportsFinder() finder.visit(mod_ast) return finder.imports
python
def ast_imports(file_path): """get list of import from python module :return: (list - tuple) (module, name, asname, level) """ with open(file_path, 'r') as fp: text = fp.read() mod_ast = ast.parse(text, file_path) finder = _ImportsFinder() finder.visit(mod_ast) return finder.imports
[ "def", "ast_imports", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "fp", ":", "text", "=", "fp", ".", "read", "(", ")", "mod_ast", "=", "ast", ".", "parse", "(", "text", ",", "file_path", ")", "finder", "=", ...
get list of import from python module :return: (list - tuple) (module, name, asname, level)
[ "get", "list", "of", "import", "from", "python", "module", ":", "return", ":", "(", "list", "-", "tuple", ")", "(", "module", "name", "asname", "level", ")" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L24-L33
schettino72/import-deps
import_deps/__init__.py
_ImportsFinder.visit_Import
def visit_Import(self, node): """callback for 'import' statement""" self.imports.extend((None, n.name, n.asname, None) for n in node.names) ast.NodeVisitor.generic_visit(self, node)
python
def visit_Import(self, node): """callback for 'import' statement""" self.imports.extend((None, n.name, n.asname, None) for n in node.names) ast.NodeVisitor.generic_visit(self, node)
[ "def", "visit_Import", "(", "self", ",", "node", ")", ":", "self", ".", "imports", ".", "extend", "(", "(", "None", ",", "n", ".", "name", ",", "n", ".", "asname", ",", "None", ")", "for", "n", "in", "node", ".", "names", ")", "ast", ".", "Node...
callback for 'import' statement
[ "callback", "for", "import", "statement" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L12-L16
schettino72/import-deps
import_deps/__init__.py
_ImportsFinder.visit_ImportFrom
def visit_ImportFrom(self, node): """callback for 'import from' statement""" self.imports.extend((node.module, n.name, n.asname, node.level) for n in node.names) ast.NodeVisitor.generic_visit(self, node)
python
def visit_ImportFrom(self, node): """callback for 'import from' statement""" self.imports.extend((node.module, n.name, n.asname, node.level) for n in node.names) ast.NodeVisitor.generic_visit(self, node)
[ "def", "visit_ImportFrom", "(", "self", ",", "node", ")", ":", "self", ".", "imports", ".", "extend", "(", "(", "node", ".", "module", ",", "n", ".", "name", ",", "n", ".", "asname", ",", "node", ".", "level", ")", "for", "n", "in", "node", ".", ...
callback for 'import from' statement
[ "callback", "for", "import", "from", "statement" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L18-L22
schettino72/import-deps
import_deps/__init__.py
PyModule._get_fqn
def _get_fqn(cls, path): """get full qualified name as list of strings :return: (list - str) of path segments from top package to given path """ name_list = [path.stem] current_path = path # move to parent path until parent path is a python package while cls.is_pkg(current_path.parent): name_list.append(current_path.parent.name) current_path = current_path.parent return list(reversed(name_list))
python
def _get_fqn(cls, path): """get full qualified name as list of strings :return: (list - str) of path segments from top package to given path """ name_list = [path.stem] current_path = path # move to parent path until parent path is a python package while cls.is_pkg(current_path.parent): name_list.append(current_path.parent.name) current_path = current_path.parent return list(reversed(name_list))
[ "def", "_get_fqn", "(", "cls", ",", "path", ")", ":", "name_list", "=", "[", "path", ".", "stem", "]", "current_path", "=", "path", "# move to parent path until parent path is a python package", "while", "cls", ".", "is_pkg", "(", "current_path", ".", "parent", ...
get full qualified name as list of strings :return: (list - str) of path segments from top package to given path
[ "get", "full", "qualified", "name", "as", "list", "of", "strings", ":", "return", ":", "(", "list", "-", "str", ")", "of", "path", "segments", "from", "top", "package", "to", "given", "path" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L65-L75
schettino72/import-deps
import_deps/__init__.py
ModuleSet._get_imported_module
def _get_imported_module(self, module_name): """try to get imported module reference by its name""" # if imported module on module_set add to list imp_mod = self.by_name.get(module_name) if imp_mod: return imp_mod # last part of import section might not be a module # remove last section no_obj = module_name.rsplit('.', 1)[0] imp_mod2 = self.by_name.get(no_obj) if imp_mod2: return imp_mod2 # special case for __init__ if module_name in self.pkgs: pkg_name = module_name + ".__init__" return self.by_name[pkg_name] if no_obj in self.pkgs: pkg_name = no_obj + ".__init__" return self.by_name[pkg_name]
python
def _get_imported_module(self, module_name): """try to get imported module reference by its name""" # if imported module on module_set add to list imp_mod = self.by_name.get(module_name) if imp_mod: return imp_mod # last part of import section might not be a module # remove last section no_obj = module_name.rsplit('.', 1)[0] imp_mod2 = self.by_name.get(no_obj) if imp_mod2: return imp_mod2 # special case for __init__ if module_name in self.pkgs: pkg_name = module_name + ".__init__" return self.by_name[pkg_name] if no_obj in self.pkgs: pkg_name = no_obj + ".__init__" return self.by_name[pkg_name]
[ "def", "_get_imported_module", "(", "self", ",", "module_name", ")", ":", "# if imported module on module_set add to list", "imp_mod", "=", "self", ".", "by_name", ".", "get", "(", "module_name", ")", "if", "imp_mod", ":", "return", "imp_mod", "# last part of import s...
try to get imported module reference by its name
[ "try", "to", "get", "imported", "module", "reference", "by", "its", "name" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L95-L116
schettino72/import-deps
import_deps/__init__.py
ModuleSet.get_imports
def get_imports(self, module, return_fqn=False): """return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names """ # print('####', module.fqn) # print(self.by_name.keys(), '\n\n') imports = set() raw_imports = ast_imports(module.path) for import_entry in raw_imports: # join 'from' and 'import' part of import statement full = ".".join(s for s in import_entry[:2] if s) import_level = import_entry[3] if import_level: # intra package imports intra = '.'.join(module.fqn[:-import_level] + [full]) imported = self._get_imported_module(intra) else: imported = self._get_imported_module(full) if imported: if return_fqn: imports.add('.'.join(imported.fqn)) else: imports.add(imported.path) return imports
python
def get_imports(self, module, return_fqn=False): """return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names """ # print('####', module.fqn) # print(self.by_name.keys(), '\n\n') imports = set() raw_imports = ast_imports(module.path) for import_entry in raw_imports: # join 'from' and 'import' part of import statement full = ".".join(s for s in import_entry[:2] if s) import_level = import_entry[3] if import_level: # intra package imports intra = '.'.join(module.fqn[:-import_level] + [full]) imported = self._get_imported_module(intra) else: imported = self._get_imported_module(full) if imported: if return_fqn: imports.add('.'.join(imported.fqn)) else: imports.add(imported.path) return imports
[ "def", "get_imports", "(", "self", ",", "module", ",", "return_fqn", "=", "False", ")", ":", "# print('####', module.fqn)", "# print(self.by_name.keys(), '\\n\\n')", "imports", "=", "set", "(", ")", "raw_imports", "=", "ast_imports", "(", "module", ".", "path", ")...
return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names
[ "return", "set", "of", "imported", "modules", "that", "are", "in", "self", ":", "param", "module", ":", "PyModule", ":", "return", ":", "(", "set", "-", "str", ")", "of", "path", "names" ]
train
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L119-L145
rapidpro/dash
dash/orgs/models.py
Invitation.generate_random_string
def generate_random_string(cls, length): """ Generatesa a [length] characters alpha numeric secret """ # avoid things that could be mistaken ex: 'I' and '1' letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
python
def generate_random_string(cls, length): """ Generatesa a [length] characters alpha numeric secret """ # avoid things that could be mistaken ex: 'I' and '1' letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
[ "def", "generate_random_string", "(", "cls", ",", "length", ")", ":", "# avoid things that could be mistaken ex: 'I' and '1'", "letters", "=", "\"23456789ABCDEFGHJKLMNPQRSTUVWXYZ\"", "return", "\"\"", ".", "join", "(", "[", "random", ".", "choice", "(", "letters", ")", ...
Generatesa a [length] characters alpha numeric secret
[ "Generatesa", "a", "[", "length", "]", "characters", "alpha", "numeric", "secret" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/models.py#L292-L298
HDI-Project/RDT
examples/airbnb.py
run_airbnb_demo
def run_airbnb_demo(data_dir): """HyperTransfomer will transform back and forth data airbnb data.""" # Setup meta_file = os.path.join(data_dir, 'Airbnb_demo_meta.json') transformer_list = ['NumberTransformer', 'DTTransformer', 'CatTransformer'] ht = HyperTransformer(meta_file) # Run transformed = ht.fit_transform(transformer_list=transformer_list) result = ht.reverse_transform(tables=transformed) # Check assert result.keys() == ht.table_dict.keys() for name, table in result.items(): assert not result[name].isnull().all().all()
python
def run_airbnb_demo(data_dir): """HyperTransfomer will transform back and forth data airbnb data.""" # Setup meta_file = os.path.join(data_dir, 'Airbnb_demo_meta.json') transformer_list = ['NumberTransformer', 'DTTransformer', 'CatTransformer'] ht = HyperTransformer(meta_file) # Run transformed = ht.fit_transform(transformer_list=transformer_list) result = ht.reverse_transform(tables=transformed) # Check assert result.keys() == ht.table_dict.keys() for name, table in result.items(): assert not result[name].isnull().all().all()
[ "def", "run_airbnb_demo", "(", "data_dir", ")", ":", "# Setup", "meta_file", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "'Airbnb_demo_meta.json'", ")", "transformer_list", "=", "[", "'NumberTransformer'", ",", "'DTTransformer'", ",", "'CatTransform...
HyperTransfomer will transform back and forth data airbnb data.
[ "HyperTransfomer", "will", "transform", "back", "and", "forth", "data", "airbnb", "data", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/examples/airbnb.py#L28-L44
noxdafox/vminspect
vminspect/winevtx.py
WinEventLog.eventlog
def eventlog(self, path): """Iterates over the Events contained within the log at the given path. For each Event, yields a XML string. """ self.logger.debug("Parsing Event log file %s.", path) with NamedTemporaryFile(buffering=0) as tempfile: self._filesystem.download(path, tempfile.name) file_header = FileHeader(tempfile.read(), 0) for xml_string, _ in evtx_file_xml_view(file_header): yield xml_string
python
def eventlog(self, path): """Iterates over the Events contained within the log at the given path. For each Event, yields a XML string. """ self.logger.debug("Parsing Event log file %s.", path) with NamedTemporaryFile(buffering=0) as tempfile: self._filesystem.download(path, tempfile.name) file_header = FileHeader(tempfile.read(), 0) for xml_string, _ in evtx_file_xml_view(file_header): yield xml_string
[ "def", "eventlog", "(", "self", ",", "path", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Parsing Event log file %s.\"", ",", "path", ")", "with", "NamedTemporaryFile", "(", "buffering", "=", "0", ")", "as", "tempfile", ":", "self", ".", "_filesy...
Iterates over the Events contained within the log at the given path. For each Event, yields a XML string.
[ "Iterates", "over", "the", "Events", "contained", "within", "the", "log", "at", "the", "given", "path", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/winevtx.py#L66-L80
raphaelgyory/django-rest-messaging-centrifugo
rest_messaging_centrifugo/signals.py
publish_message_to_centrifugo
def publish_message_to_centrifugo(sender, instance, created, **kwargs): """ Publishes each saved message to Centrifugo. """ if created is True: client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET")) # we ensure the client is still in the thread (he may have left or have been removed) active_participants = [participation.participant.id for participation in Participation.objects.filter(thread=instance.thread, date_left__isnull=True).select_related('participant')] client.publish( build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.thread.id, active_participants), { "id": instance.id, "body": instance.body, "sender": instance.sender.id, "thread": instance.thread.id, "sent_at": str(instance.sent_at), "is_notification": True, # ATTENTION: check against sender too to be sure to not notify him his message } )
python
def publish_message_to_centrifugo(sender, instance, created, **kwargs): """ Publishes each saved message to Centrifugo. """ if created is True: client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET")) # we ensure the client is still in the thread (he may have left or have been removed) active_participants = [participation.participant.id for participation in Participation.objects.filter(thread=instance.thread, date_left__isnull=True).select_related('participant')] client.publish( build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.thread.id, active_participants), { "id": instance.id, "body": instance.body, "sender": instance.sender.id, "thread": instance.thread.id, "sent_at": str(instance.sent_at), "is_notification": True, # ATTENTION: check against sender too to be sure to not notify him his message } )
[ "def", "publish_message_to_centrifugo", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "created", "is", "True", ":", "client", "=", "Client", "(", "\"{0}api/\"", ".", "format", "(", "getattr", "(", "settings", ",",...
Publishes each saved message to Centrifugo.
[ "Publishes", "each", "saved", "message", "to", "Centrifugo", "." ]
train
https://github.com/raphaelgyory/django-rest-messaging-centrifugo/blob/f44022cd9fc83e84ab573fe8a8385c85f6e77380/rest_messaging_centrifugo/signals.py#L17-L33
raphaelgyory/django-rest-messaging-centrifugo
rest_messaging_centrifugo/signals.py
publish_participation_to_thread
def publish_participation_to_thread(sender, instance, created, **kwargs): """ Warns users everytime a thread including them is published. This is done via channel subscription. """ if kwargs.get('created_and_add_participants') is True: request_participant_id = kwargs.get('request_participant_id') if request_participant_id is not None: client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET")) active_participants = [participation.participant for participation in Participation.objects.filter(thread=instance, date_left__isnull=True).select_related('participant')] for participant in active_participants: client.publish( build_channel(settings.CENTRIFUGO_THREAD_NAMESPACE, participant.id, [participant.id]), { "message_channel_to_connect_to": build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.id, [p.id for p in active_participants]) } )
python
def publish_participation_to_thread(sender, instance, created, **kwargs): """ Warns users everytime a thread including them is published. This is done via channel subscription. """ if kwargs.get('created_and_add_participants') is True: request_participant_id = kwargs.get('request_participant_id') if request_participant_id is not None: client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET")) active_participants = [participation.participant for participation in Participation.objects.filter(thread=instance, date_left__isnull=True).select_related('participant')] for participant in active_participants: client.publish( build_channel(settings.CENTRIFUGO_THREAD_NAMESPACE, participant.id, [participant.id]), { "message_channel_to_connect_to": build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.id, [p.id for p in active_participants]) } )
[ "def", "publish_participation_to_thread", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'created_and_add_participants'", ")", "is", "True", ":", "request_participant_id", "=", "kwargs", ".", ...
Warns users everytime a thread including them is published. This is done via channel subscription.
[ "Warns", "users", "everytime", "a", "thread", "including", "them", "is", "published", ".", "This", "is", "done", "via", "channel", "subscription", "." ]
train
https://github.com/raphaelgyory/django-rest-messaging-centrifugo/blob/f44022cd9fc83e84ab573fe8a8385c85f6e77380/rest_messaging_centrifugo/signals.py#L38-L51
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.getFileAndName
def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName
python
def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName
[ "def", "getFileAndName", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pgctnt", ",", "hName", ",", "mime", "=", "self", ".", "getFileNameMime", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "pgctnt", ",", "hName" ]
Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename.
[ "Give", "a", "requested", "page", "(", "note", ":", "the", "arguments", "for", "this", "call", "are", "forwarded", "to", "getpage", "()", ")", "return", "the", "content", "at", "the", "target", "URL", "and", "the", "filename", "for", "the", "target", "co...
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L279-L290
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.getFileNameMime
def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime
python
def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime
[ "def", "getFileNameMime", "(", "self", ",", "requestedUrl", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'returnMultiple'", "in", "kwargs", ":", "raise", "Exceptions", ".", "ArgumentError", "(", "\"getFileAndName cannot be called with 'returnMultiple'...
Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename.
[ "Give", "a", "requested", "page", "(", "note", ":", "the", "arguments", "for", "this", "call", "are", "forwarded", "to", "getpage", "()", ")", "return", "the", "content", "at", "the", "target", "URL", "the", "filename", "for", "the", "target", "content", ...
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L292-L334
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__check_suc_cookie
def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components))
python
def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components))
[ "def", "__check_suc_cookie", "(", "self", ",", "components", ")", ":", "netloc", "=", "components", ".", "netloc", ".", "lower", "(", ")", "for", "cookie", "in", "self", ".", "cj", ":", "if", "cookie", ".", "domain_specified", "and", "(", "cookie", ".", ...
This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through.
[ "This", "is", "only", "called", "if", "we", "re", "on", "a", "known", "sucuri", "-", "protected", "site", ".", "As", "such", "if", "we", "do", "*", "not", "*", "have", "a", "sucuri", "cloudproxy", "cookie", "we", "can", "assume", "we", "need", "to", ...
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L398-L413
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__pre_check
def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components)
python
def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components)
[ "def", "__pre_check", "(", "self", ",", "requestedUrl", ")", ":", "components", "=", "urllib", ".", "parse", ".", "urlsplit", "(", "requestedUrl", ")", "netloc_l", "=", "components", ".", "netloc", ".", "lower", "(", ")", "if", "netloc_l", "in", "Domain_Co...
Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters.
[ "Allow", "the", "pre", "-", "emptive", "fetching", "of", "sites", "with", "a", "full", "browser", "if", "they", "re", "known", "to", "be", "dick", "hosters", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L435-L454
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__decompressContent
def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt
python
def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt
[ "def", "__decompressContent", "(", "self", ",", "coding", ",", "pgctnt", ")", ":", "#preLen = len(pgctnt)", "if", "coding", "==", "'deflate'", ":", "compType", "=", "\"deflate\"", "bits_opts", "=", "[", "-", "zlib", ".", "MAX_WBITS", ",", "# deflate", "zlib", ...
This is really obnoxious
[ "This", "is", "really", "obnoxious" ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L716-L769
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.addSeleniumCookie
def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie)
python
def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie)
[ "def", "addSeleniumCookie", "(", "self", ",", "cookieDict", ")", ":", "# print cookieDict", "cookie", "=", "http", ".", "cookiejar", ".", "Cookie", "(", "version", "=", "0", ",", "name", "=", "cookieDict", "[", "'name'", "]", ",", "value", "=", "cookieDict...
Install a cookie exported from a selenium webdriver into the active opener
[ "Install", "a", "cookie", "exported", "from", "a", "selenium", "webdriver", "into", "the", "active", "opener" ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L959-L985
jasonwyatt/Flask-ErrorMail
flask_errormail/__init__.py
mail_on_500
def mail_on_500(app, recipients, sender='noreply@localhost'): '''Main function for setting up Flask-ErrorMail to send e-mails when 500 errors occur. :param app: Flask Application Object :type app: flask.Flask :param recipients: List of recipient email addresses. :type recipients: list or tuple :param sender: Email address that should be listed as the sender. Defaults to 'noreply@localhost' :type sender: string ''' #importing locally, so that the dependencies are only required if # mail_on_500 is used. from flask import request as __request from flask_mail import Mail as __Mail from flask_mail import Message as __Message mail = __Mail(app) # create a closure to track the sender and recipients def email_exception(exception): '''Handles the exception message from Flask by sending an email to the recipients defined in the call to mail_on_500. ''' msg = __Message("[Flask|ErrorMail] Exception Detected", sender=sender, recipients=recipients) msg_contents = [ 'Traceback:', '='*80, traceback.format_exc(), ] msg_contents.append('\n') msg_contents.append('Request Information:') msg_contents.append('='*80) environ = __request.environ environkeys = sorted(environ.keys()) for key in environkeys: msg_contents.append('%s: %s' % (key, environ.get(key))) msg.body = '\n'.join(msg_contents) + '\n' mail.send(msg) app.register_error_handler(500, email_exception)
python
def mail_on_500(app, recipients, sender='noreply@localhost'): '''Main function for setting up Flask-ErrorMail to send e-mails when 500 errors occur. :param app: Flask Application Object :type app: flask.Flask :param recipients: List of recipient email addresses. :type recipients: list or tuple :param sender: Email address that should be listed as the sender. Defaults to 'noreply@localhost' :type sender: string ''' #importing locally, so that the dependencies are only required if # mail_on_500 is used. from flask import request as __request from flask_mail import Mail as __Mail from flask_mail import Message as __Message mail = __Mail(app) # create a closure to track the sender and recipients def email_exception(exception): '''Handles the exception message from Flask by sending an email to the recipients defined in the call to mail_on_500. ''' msg = __Message("[Flask|ErrorMail] Exception Detected", sender=sender, recipients=recipients) msg_contents = [ 'Traceback:', '='*80, traceback.format_exc(), ] msg_contents.append('\n') msg_contents.append('Request Information:') msg_contents.append('='*80) environ = __request.environ environkeys = sorted(environ.keys()) for key in environkeys: msg_contents.append('%s: %s' % (key, environ.get(key))) msg.body = '\n'.join(msg_contents) + '\n' mail.send(msg) app.register_error_handler(500, email_exception)
[ "def", "mail_on_500", "(", "app", ",", "recipients", ",", "sender", "=", "'noreply@localhost'", ")", ":", "#importing locally, so that the dependencies are only required if ", "# mail_on_500 is used.", "from", "flask", "import", "request", "as", "__request", "from", "flask_...
Main function for setting up Flask-ErrorMail to send e-mails when 500 errors occur. :param app: Flask Application Object :type app: flask.Flask :param recipients: List of recipient email addresses. :type recipients: list or tuple :param sender: Email address that should be listed as the sender. Defaults to 'noreply@localhost' :type sender: string
[ "Main", "function", "for", "setting", "up", "Flask", "-", "ErrorMail", "to", "send", "e", "-", "mails", "when", "500", "errors", "occur", "." ]
train
https://github.com/jasonwyatt/Flask-ErrorMail/blob/d4ec1c591ac10eac2a033c9c5a4a2ced01fd2c99/flask_errormail/__init__.py#L14-L63
brbsix/pip-utils
pip_utils/dependents.py
command_dependents
def command_dependents(options): """Command launched by CLI.""" dependents = dependencies(options.package, options.recursive, options.info) if dependents: print(*dependents, sep='\n')
python
def command_dependents(options): """Command launched by CLI.""" dependents = dependencies(options.package, options.recursive, options.info) if dependents: print(*dependents, sep='\n')
[ "def", "command_dependents", "(", "options", ")", ":", "dependents", "=", "dependencies", "(", "options", ".", "package", ",", "options", ".", "recursive", ",", "options", ".", "info", ")", "if", "dependents", ":", "print", "(", "*", "dependents", ",", "se...
Command launched by CLI.
[ "Command", "launched", "by", "CLI", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependents.py#L11-L16
brbsix/pip-utils
pip_utils/dependents.py
dependencies
def dependencies(dist, recursive=False, info=False): """Yield distribution's dependencies.""" def case_sorted(items): """Return unique list sorted in case-insensitive order.""" return sorted(set(items), key=lambda i: i.lower()) def requires(distribution): """Return the requirements for a distribution.""" if recursive: req = set(pkg_resources.require(distribution.project_name)) req.remove(distribution) return {r.as_requirement() for r in req} return distribution.requires() def modifier(distribution): """Return project's name or full requirement string.""" return str(distribution) if info else distribution.project_name return case_sorted(modifier(r) for r in requires(dist))
python
def dependencies(dist, recursive=False, info=False): """Yield distribution's dependencies.""" def case_sorted(items): """Return unique list sorted in case-insensitive order.""" return sorted(set(items), key=lambda i: i.lower()) def requires(distribution): """Return the requirements for a distribution.""" if recursive: req = set(pkg_resources.require(distribution.project_name)) req.remove(distribution) return {r.as_requirement() for r in req} return distribution.requires() def modifier(distribution): """Return project's name or full requirement string.""" return str(distribution) if info else distribution.project_name return case_sorted(modifier(r) for r in requires(dist))
[ "def", "dependencies", "(", "dist", ",", "recursive", "=", "False", ",", "info", "=", "False", ")", ":", "def", "case_sorted", "(", "items", ")", ":", "\"\"\"Return unique list sorted in case-insensitive order.\"\"\"", "return", "sorted", "(", "set", "(", "items",...
Yield distribution's dependencies.
[ "Yield", "distribution", "s", "dependencies", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependents.py#L19-L38
rapidpro/dash
dash/orgs/context_processors.py
user_group_perms_processor
def user_group_perms_processor(request): """ return context variables with org permissions to the user. """ org = None group = None if hasattr(request, "user"): if request.user.is_anonymous: group = None else: group = request.user.get_org_group() org = request.user.get_org() if group: context = dict(org_perms=GroupPermWrapper(group)) else: context = dict() # make sure user_org is set on our request based on their session context["user_org"] = org return context
python
def user_group_perms_processor(request): """ return context variables with org permissions to the user. """ org = None group = None if hasattr(request, "user"): if request.user.is_anonymous: group = None else: group = request.user.get_org_group() org = request.user.get_org() if group: context = dict(org_perms=GroupPermWrapper(group)) else: context = dict() # make sure user_org is set on our request based on their session context["user_org"] = org return context
[ "def", "user_group_perms_processor", "(", "request", ")", ":", "org", "=", "None", "group", "=", "None", "if", "hasattr", "(", "request", ",", "\"user\"", ")", ":", "if", "request", ".", "user", ".", "is_anonymous", ":", "group", "=", "None", "else", ":"...
return context variables with org permissions to the user.
[ "return", "context", "variables", "with", "org", "permissions", "to", "the", "user", "." ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/context_processors.py#L43-L65