text
stringlengths
81
112k
Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories. def DirectoryStimuliFactory(loader): """ Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories. """ impath = loader.impath ftrpath = loader.ftrpath # checks whether user has reading permission for the path assert os.access(impath, os.R_OK) assert os.access(ftrpath, os.R_OK) # EXTRACTING IMAGE NAMES img_per_cat = {} # extract only directories in the given folder subfolders = [name for name in os.listdir(impath) if os.path.isdir( os.path.join(impath, name))] # if there are no subfolders, walk through files. Take 1 as key for the # categories object if not subfolders: [_, _, files] = next(os.walk(os.path.join(impath))) # this only takes entries that end with '.png' entries = {1: [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if cur_file.endswith('.png')]} img_per_cat.update(entries) subfolders = [''] # if there are subfolders, walk through them else: for directory in subfolders: [_, _, files] = next(os.walk(os.path.join(impath, directory))) # this only takes entries that end with '.png'. Strips ending and # considers everything after the first '_' as the imagenumber imagenumbers = [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if (cur_file.endswith('.png') & (len(cur_file) > 4))] entries = {int(directory): imagenumbers} img_per_cat.update(entries) del directory del imagenumbers # in case subfolders do not exist, '' is appended here. _, features, files = next(os.walk(os.path.join(ftrpath, subfolders[0]))) return Categories(loader, img_per_cat = img_per_cat, features = features)
Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object def fixations(self): ''' Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object''' if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') if len(list(self._categories.keys())) == 0: return None else: idx = np.zeros(self._fixations.x.shape, dtype='bool') for (cat, _) in list(self._categories.items()): idx = idx | ((self._fixations.category == cat)) return self._fixations[idx]
Saves a new image to disk def data(self, value): """ Saves a new image to disk """ self.loader.save_image(self.category, self.image, value)
Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. def fixations(self): """ Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. """ if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') return self._fixations[(self._fixations.category == self.category) & (self._fixations.filenumber == self.image)]
Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train). def generate(self): """ Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train). """ for _ in range(0, self.num_slices): #1. separate fixmat into test and training fixmat subjects = np.unique(self.fm.SUBJECTINDEX) test_subs = randsample(subjects, self.subject_hold_out*len(subjects)) train_subs = [x for x in subjects if x not in test_subs] test_fm = self.fm[ismember(self.fm.SUBJECTINDEX, test_subs)] train_fm = self.fm[ismember(self.fm.SUBJECTINDEX, train_subs)] #2. distribute images test_imgs = {} train_imgs = {} id_test = (test_fm.x <1) & False id_train = (train_fm.x <1) & False for cat in self.categories: imgs = cat.images() test_imgs.update({cat.category:randsample(imgs, self.image_hold_out*len(imgs)).tolist()}) train_imgs.update({cat.category:[x for x in imgs if not ismember(x, test_imgs[cat.category])]}) id_test = id_test | ((ismember(test_fm.filenumber, test_imgs[cat.category])) & (test_fm.category == cat.category)) id_train = id_train | ((ismember(train_fm.filenumber, train_imgs[cat.category])) & (train_fm.category == cat.category)) #3. Create categories objects and yield result test_stimuli = Categories(self.categories.loader, test_imgs, features=self.categories._features, fixations=test_fm) train_stimuli = Categories(self.categories.loader, train_imgs, features=self.categories._features, fixations=train_fm) yield (train_fm[id_train], train_stimuli, test_fm[id_test], test_stimuli)
Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. def prepare_data(fm, max_back, dur_cap=700): ''' Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. ''' durations = np.roll(fm.end - fm.start, 1).astype(float) angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) # durations and ads are aligned in a way that an entry in ads # encodes the angle of the saccade away from a fixation in # durations forward_angle = abs(reshift(ads[0])).astype(float) ads = [abs(reshift(a)) for a in ads] # Now filter out weird fixation durations id_in = durations > dur_cap durations[id_in] = np.nan forward_angle[id_in] = np.nan return fm, durations, forward_angle, ads, lds
Computes the mean fixation duration at forward angles. def saccadic_momentum_effect(durations, forward_angle, summary_stat=nanmean): """ Computes the mean fixation duration at forward angles. """ durations_per_da = np.nan * np.ones((len(e_angle) - 1,)) for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])): idx = ( bo <= forward_angle) & ( forward_angle < b1) & ( ~np.isnan(durations)) durations_per_da[i] = summary_stat(durations[idx]) return durations_per_da
Computes a measure of fixation durations at delta angle and delta length combinations. def ior_effect(durations, angle_diffs, length_diffs, summary_stat=np.mean, parallel=True, min_samples=20): """ Computes a measure of fixation durations at delta angle and delta length combinations. """ raster = np.empty((len(e_dist) - 1, len(e_angle) - 1), dtype=object) for a, (a_low, a_upp) in enumerate(zip(e_angle[:-1], e_angle[1:])): for d, (d_low, d_upp) in enumerate(zip(e_dist[:-1], e_dist[1:])): idx = ((d_low <= length_diffs) & (length_diffs < d_upp) & (a_low <= angle_diffs) & (angle_diffs < a_upp)) if sum(idx) < min_samples: raster[d, a] = np.array([np.nan]) else: raster[d, a] = durations[idx] if parallel: p = pool.Pool(3) result = p.map(summary_stat, list(raster.flatten())) p.terminate() else: result = list(map(summary_stat, list(raster.flatten()))) for idx, value in enumerate(result): i, j = np.unravel_index(idx, raster.shape) raster[i, j] = value return raster
Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """ if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter. def subject_predictions(fm, field='SUBJECTINDEX', method=predict_fixation_duration, data=None): ''' Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter. ''' if data is None: fma, dura, faa, adsa, ldsa = prepare_data(fm, dur_cap=700, max_back=5) adsa = adsa[0] ldsa = ldsa[0] else: fma, dura, faa, adsa, ldsa = data fma = fma.copy() # [ones(fm.x.shape)] sub_effects = [] sub_predictions = [] parameters = [] for i, fmsub in enumerate(np.unique(fma.field(field))): id = fma.field(field) == fmsub #_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5) dur, fa, ads, lds = dura[id], faa[id], adsa[id], ldsa[id] params = {} _ = method(dur, fa, lds, params=params) ps = params['v0'] ld_corrected = leastsq_only_dist(lds, ps[4], ps[5]) prediction = leastsq_only_angle(fa, ps[0], ps[1], ps[2], ps[3]) sub_predictions += [saccadic_momentum_effect(prediction, fa)] sub_effects += [saccadic_momentum_effect(dur - ld_corrected, fa)] parameters += [ps] return np.array(sub_effects), np.array(sub_predictions), parameters
Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives. def intersubject_scores(fm, category, predicting_filenumbers, predicting_subjects, predicted_filenumbers, predicted_subjects, controls = True, scale_factor = 1): """ Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives. """ predicting_fm = fm[ (ismember(fm.SUBJECTINDEX, predicting_subjects)) & (ismember(fm.filenumber, predicting_filenumbers)) & (fm.category == category)] predicted_fm = fm[ (ismember(fm.SUBJECTINDEX,predicted_subjects)) & (ismember(fm.filenumber,predicted_filenumbers))& (fm.category == category)] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None if controls == True: fm_controls = fm[ (ismember(fm.SUBJECTINDEX, predicted_subjects)) & ((ismember(fm.filenumber, predicted_filenumbers)) != True) & (fm.category == category)] return measures.prediction_scores(predicting_fdm, predicted_fm, controls = (fm_controls.y, fm_controls.x)) return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None)
Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores def intersubject_scores_random_subjects(fm, category, filenumber, n_train, n_predict, controls=True, scale_factor = 1): """ Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores """ subjects = np.unique(fm.SUBJECTINDEX) if len(subjects) < n_train + n_predict: raise ValueError("""Not enough subjects in fixmat""") # draw a random sample of subjects for testing and evaluation, according # to the specified set sizes (n_train, n_predict) np.random.shuffle(subjects) predicted_subjects = subjects[0 : n_predict] predicting_subjects = subjects[n_predict : n_predict + n_train] assert len(predicting_subjects) == n_train assert len(predicted_subjects) == n_predict assert [x not in predicting_subjects for x in predicted_subjects] return intersubject_scores(fm, category, [filenumber], predicting_subjects, [filenumber], predicted_subjects, controls, scale_factor)
compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. def upper_bound(fm, nr_subs = None, scale_factor = 1): """ compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if not nr_subs: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values intersub_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category), result_vectors))) intersub_scores.append(res_dict) #compute inter-subject scores for every stimulus, with leave-one-out #over subjects for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): predicting_subs = (np.setdiff1d(np.unique( fm_single.SUBJECTINDEX),[sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fm = fm_single[ (ismember(fm_single.SUBJECTINDEX, predicting_subs))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores( predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): intersub_scores[measure][cat][sub_counter] = score return intersub_scores
Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. def lower_bound(fm, nr_subs = None, nr_imgs = None, scale_factor = 1): """ Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if nr_subs is None: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values sb_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category),result_vectors))) sb_scores.append(res_dict) # compute mean spatial bias predictive power for all subjects in all # categories for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] nr_imgs_cat = len(np.unique(fm_cat.filenumber)) if not nr_imgs: nr_imgs_current = nr_imgs_cat - 1 else: nr_imgs_current = nr_imgs assert(nr_imgs_current < nr_imgs_cat) for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): # Iterating by field filenumber makes filenumbers # in fm_single unique: Just take the first one to get the # filenumber for this fixmat fn = fm_single.filenumber[0] predicting_subs = (np.setdiff1d(np.unique( fm_cat.SUBJECTINDEX), [sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fns = (np.setdiff1d(np.unique( fm_cat.filenumber), [fn])) np.random.shuffle(predicting_fns) predicting_fns = predicting_fns[0:nr_imgs_current] predicting_fm = fm_cat[ (ismember(fm_cat.SUBJECTINDEX, predicting_subs)) & (ismember(fm_cat.filenumber, predicting_fns))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): sb_scores[measure][cat][sub_counter] = score return sb_scores
Calculates subscripts for indices into regularly spaced matrixes. def ind2sub(ind, dimensions): """ Calculates subscripts for indices into regularly spaced matrixes. """ # check that the index is within range if ind >= np.prod(dimensions): raise RuntimeError("ind2sub: index exceeds array size") cum_dims = list(dimensions) cum_dims.reverse() m = 1 mult = [] for d in cum_dims: m = m*d mult.append(m) mult.pop() mult.reverse() mult.append(1) indices = [] for d in mult: indices.append((ind/d)+1) ind = ind - (ind/d)*d return indices
An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes. def sub2ind(indices, dimensions): """ An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes. """ # check that none of the indices exceeds the size of the array if any([i > j for i, j in zip(indices, dimensions)]): raise RuntimeError("sub2ind:an index exceeds its dimension's size") dims = list(dimensions) dims.append(1) dims.remove(dims[0]) dims.reverse() ind = list(indices) ind.reverse() idx = 0 mult = 1 for (cnt, dim) in zip(ind, dims): mult = dim*mult idx = idx + (cnt-1)*mult return idx
Restores a task store from file. def RestoreTaskStoreFactory(store_class, chunk_size, restore_file, save_file): """ Restores a task store from file. """ intm_results = np.load(restore_file) intm = intm_results[intm_results.files[0]] idx = np.isnan(intm).flatten().nonzero()[0] partitions = math.ceil(len(idx) / float(chunk_size)) task_store = store_class(partitions, idx.tolist(), save_file) task_store.num_tasks = len(idx) # Also set up matrices for saving results for f in intm_results.files: task_store.__dict__[f] = intm_results[f] return task_store
Reschedule all running tasks. def xmlrpc_reschedule(self): """ Reschedule all running tasks. """ if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
Return a new task description: ID and necessary parameters, all are given in a dictionary def xmlrpc_get_task(self): """ Return a new task description: ID and necessary parameters, all are given in a dictionary """ try: if len(self.reschedule) == 0: (task_id, cur_task) = next(self.task_iterator) else: (task_id, cur_task) = self.reschedule.pop() self.scheduled_tasks.update({task_id: cur_task}) return (task_id, cur_task.to_dict()) except StopIteration: print('StopIteration: No more tasks') return False except Exception as err: print('Some other error') print(err) return False
Take the results of a computation and put it into the results list. def xmlrpc_task_done(self, result): """ Take the results of a computation and put it into the results list. """ (task_id, task_results) = result del self.scheduled_tasks[task_id] self.task_store.update_results(task_id, task_results) self.results += 1 return True
Return a status message def xmlrpc_status(self): """ Return a status message """ return (""" %i Jobs are still wating for execution %i Jobs are being processed %i Jobs are done """ %(self.task_store.partitions - self.results - len(self.scheduled_tasks), len(self.scheduled_tasks), self.results))
Save results and own state into file. def xmlrpc_save2file(self, filename): """ Save results and own state into file. """ savefile = open(filename,'wb') try: pickle.dump({'scheduled':self.scheduled_tasks, 'reschedule':self.reschedule},savefile) except pickle.PicklingError: return -1 savefile.close() return 1
This function needs to be called to start the computation. def run(self): """This function needs to be called to start the computation.""" (task_id, tasks) = self.server.get_task() self.task_store.from_dict(tasks) for (index, task) in self.task_store: result = self.compute(index, task) self.results.append(result) self.server.task_done((task_id, self.results))
Configures the task store to be the task_store described in description def from_dict(self, description): """Configures the task store to be the task_store described in description""" assert(self.ident == description['ident']) self.partitions = description['partitions'] self.indices = description['indices']
Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks. def partition(self): """Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.""" step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) def fit3d(samples, e_x, e_y, e_z, remove_zeros = False, **kw): """Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ height, width, depth = len(e_y)-1, len(e_x)-1, len(e_z)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z)) p_est = p_est/sum(p_est.flat) p_est = p_est.flatten() if remove_zeros: non_zero = ~(p_est == 0) else: non_zero = (p_est >= 0) basis = spline_base3d(width,height, depth, **kw) model = linear_model.BayesianRidge() model.fit(basis[:, non_zero].T, p_est[:,np.newaxis][non_zero,:]) return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))
Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) def fit2d(samples,e_x, e_y, remove_zeros = False, p_est = None, **kw): """Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ if p_est is None: height = len(e_y)-1 width = len(e_x)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y)) else: p_est = p_est.T width, height = p_est.shape # p_est contains x in dim 1 and y in dim 0 shape = p_est.shape p_est = (p_est/sum(p_est.flat)).reshape(shape) mx = p_est.sum(1) my = p_est.sum(0) # Transpose hist to have x in dim 0 p_est = p_est.T.flatten() basis, knots = spline_base2d(width, height, marginal_x = mx, marginal_y = my, **kw) model = linear_model.BayesianRidge() if remove_zeros: non_zero = ~(p_est == 0) model.fit(basis[:, non_zero].T, p_est[non_zero]) else: non_zero = (p_est >= 0) p_est[~non_zero,:] = np.finfo(float).eps model.fit(basis.T, p_est) return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)
Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis def fit1d(samples, e, remove_zeros = False, **kw): """Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis """ samples = samples[~np.isnan(samples)] length = len(e)-1 hist,_ = np.histogramdd(samples, (e,)) hist = hist/sum(hist) basis, knots = spline_base1d(length, marginal = hist, **kw) non_zero = hist>0 model = linear_model.BayesianRidge() if remove_zeros: model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:]) else: hist[~non_zero] = np.finfo(float).eps model.fit(basis, hist[:,np.newaxis]) return model.predict(basis), hist, knots
Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions def knots_from_marginal(marginal, nr_knots, spline_order): """ Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions """ cumsum = np.cumsum(marginal) cumsum = cumsum/cumsum.max() borders = np.linspace(0,1,nr_knots) knot_placement = [0] + np.unique([np.where(cumsum>=b)[0][0] for b in borders[1:-1]]).tolist() +[len(marginal)-1] knots = augknt(knot_placement, spline_order) return knots
Computes a 1D spline basis Input: length: int length of each basis nr_knots: int Number of knots, i.e. number of basis functions. spline_order: int Order of the splines. marginal: array, optional Estimate of the marginal distribution of the input to be fitted. If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None): """Computes a 1D spline basis Input: length: int length of each basis nr_knots: int Number of knots, i.e. number of basis functions. spline_order: int Order of the splines. marginal: array, optional Estimate of the marginal distribution of the input to be fitted. If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. """ if marginal is None: knots = augknt(np.linspace(0,length+1, nr_knots), spline_order) else: knots = knots_from_marginal(marginal, nr_knots, spline_order) x_eval = np.arange(1,length+1).astype(float) Bsplines = spcol(x_eval,knots,spline_order) return Bsplines, knots
Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. def spline_base2d(width, height, nr_knots_x = 20.0, nr_knots_y = 20.0, spline_order = 5, marginal_x = None, marginal_y = None): """Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. """ if not (nr_knots_x<width and nr_knots_y<height): raise RuntimeError("Too many knots for size of the base") if marginal_x is None: knots_x = augknt(np.linspace(0,width+1,nr_knots_x), spline_order) else: knots_x = knots_from_marginal(marginal_x, nr_knots_x, spline_order) if marginal_y is None: knots_y = augknt(np.linspace(0,height+1, nr_knots_y), spline_order) else: knots_y = knots_from_marginal(marginal_y, nr_knots_y, spline_order) x_eval = np.arange(1,width+1).astype(float) y_eval = np.arange(1,height+1).astype(float) spline_setx = spcol(x_eval, knots_x, spline_order) spline_sety = spcol(y_eval, knots_y, spline_order) nr_coeff = [spline_sety.shape[1], spline_setx.shape[1]] dim_bspline = [nr_coeff[0]*nr_coeff[1], len(x_eval)*len(y_eval)] # construct 2D B-splines nr_basis = 0 bspline = np.zeros(dim_bspline) for IDX1 in range(0,nr_coeff[0]): for IDX2 in range(0, nr_coeff[1]): rand_coeff = np.zeros((nr_coeff[0] , nr_coeff[1])) rand_coeff[IDX1,IDX2] = 1 tmp = np.dot(spline_sety,rand_coeff) bspline[nr_basis,:] = np.dot(tmp,spline_setx.T).reshape((1,-1)) nr_basis = nr_basis+1 return bspline, (knots_x, knots_y)
Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d. def spline_base3d( width, height, depth, nr_knots_x = 10.0, nr_knots_y = 10.0, nr_knots_z=10, spline_order = 3, marginal_x = None, marginal_y = None, marginal_z = None): """Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d. """ if not nr_knots_z < depth: raise RuntimeError("Too many knots for size of the base") basis2d, (knots_x, knots_y) = spline_base2d(height, width, nr_knots_x, nr_knots_y, spline_order, marginal_x, marginal_y) if marginal_z is not None: knots_z = knots_from_marginal(marginal_z, nr_knots_z, spline_order) else: knots_z = augknt(np.linspace(0,depth+1, nr_knots_z), spline_order) z_eval = np.arange(1,depth+1).astype(float) spline_setz = spcol(z_eval, knots_z, spline_order) bspline = np.zeros((basis2d.shape[0]*len(z_eval), height*width*depth)) basis_nr = 0 for spline_a in spline_setz.T: for spline_b in basis2d: spline_b = spline_b.reshape((height, width)) bspline[basis_nr, :] = (spline_b[:,:,np.newaxis] * spline_a[:]).flat basis_nr +=1 return bspline, (knots_x, knots_y, knots_z)
Evaluates the ith spline basis given by knots on points in x def spline(x,knots,p,i=0.0): """Evaluates the ith spline basis given by knots on points in x""" assert(p+1<len(knots)) return np.array([N(float(u),float(i),float(p),knots) for u in x])
Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x. def spcol(x,knots,spline_order): """Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x. """ colmat = np.nan*np.ones((len(x),len(knots) - spline_order-1)) for i in range(0,len(knots) - spline_order -1): colmat[:,i] = spline(x,knots,spline_order,i) return colmat
Augment knot sequence such that some boundary conditions are met. def augknt(knots,order): """Augment knot sequence such that some boundary conditions are met.""" a = [] [a.append(knots[0]) for t in range(0,order)] [a.append(k) for k in knots] [a.append(knots[-1]) for t in range(0,order)] return np.array(a)
Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u. def N(u,i,p,knots): """Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u. """ if p == 0: if knots[i] < u and u <=knots[i+1]: return 1.0 else: return 0.0 else: try: k = (( float((u-knots[i]))/float((knots[i+p] - knots[i]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = (( float((knots[i+p+1] - u))/float((knots[i+p+1] - knots[i+1]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: >>> prediction_scores(P, FM, ctr_loc = (y,x)) In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores. def prediction_scores(prediction, fm, **kw): """ Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: >>> prediction_scores(P, FM, ctr_loc = (y,x)) In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores. """ if prediction == None: return [np.NaN for measure in scores] results = [] for measure in scores: (args, _, _, _) = inspect.getargspec(measure) if len(args)>2: # Filter dictionary, such that only the keys that are # expected by the measure are in it mdict = {} [mdict.update({key:value}) for (key, value) in list(kw.items()) if key in args] score = measure(prediction, fm, **mdict) else: score = measure(prediction, fm) results.append(score) return results
wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction def kldiv_model(prediction, fm): """ wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) q = np.array(prediction, copy=True) q -= np.min(q.flatten()) q /= np.sum(q.flatten()) return kldiv(None, q, distp = fm, scale_factor = r_x)
Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp. def kldiv(p, q, distp = None, distq = None, scale_factor = 1): """ Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp. """ assert q != None or distq != None, "Either q or distq have to be given" assert p != None or distp != None, "Either p or distp have to be given" try: if p == None: p = compute_fdm(distp, scale_factor = scale_factor) if q == None: q = compute_fdm(distq, scale_factor = scale_factor) except RuntimeError: return np.NaN q += np.finfo(q.dtype).eps p += np.finfo(p.dtype).eps kl = np.sum( p * (np.log2(p / q))) return kl
Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object def kldiv_cs_model(prediction, fm): """ Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object """ # compute histogram of fixations needed for ChaoShen corrected kl-div # image category must exist (>-1) and image_size must be non-empty assert(len(fm.image_size) == 2 and (fm.image_size[0] > 0) and (fm.image_size[1] > 0)) assert(-1 not in fm.category) # check whether fixmat contains fixations if len(fm.x) == 0: return np.NaN (scale_factor, _) = calc_resize_factor(prediction, fm.image_size) # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fm.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fm.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fm.y), (scale_factor*fm.x)))) (fdm, _) = np.histogramdd(samples, (e_y, e_x)) # compute ChaoShen corrected kl-div q = np.array(prediction, copy = True) q[q == 0] = np.finfo(q.dtype).eps q /= np.sum(q) (H, pa, la) = chao_shen(fdm) q = q[fdm > 0] cross_entropy = -np.sum((pa * np.log2(q)) / la) return (cross_entropy - H)
Computes some terms needed for the Chao-Shen KL correction. def chao_shen(q): """ Computes some terms needed for the Chao-Shen KL correction. """ yx = q[q > 0] # remove bins with zero counts n = np.sum(yx) p = yx.astype(float)/n f1 = np.sum(yx == 1) # number of singletons in the sample if f1 == n: # avoid C == 0 f1 -= 1 C = 1 - (f1/n) # estimated coverage of the sample pa = C * p # coverage adjusted empirical frequencies la = (1 - (1 - pa) ** n) # probability to see a bin (species) in the sample H = -np.sum((pa * np.log2(pa)) / la) return (H, pa, la)
wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. def correlation_model(prediction, fm): """ wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals def nss_model(prediction, fm): """ wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals """ (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) fix = ((np.array(fm.y-1)*r_y).astype(int), (np.array(fm.x-1)*r_x).astype(int)) return nss(prediction, fix)
Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x def nss(prediction, fix): """ Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x """ prediction = prediction - np.mean(prediction) prediction = prediction / np.std(prediction) return np.mean(prediction[fix[0], fix[1]])
wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size def roc_model(prediction, fm, ctr_loc = None, ctr_size = None): """ wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size """ # check if prediction is a valid numpy array assert type(prediction) == np.ndarray # check whether scaling preserved aspect ratio (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) # read out values in the fdm at actual fixation locations # .astype(int) floors numbers in np.array y_index = (r_y * np.array(fm.y-1)).astype(int) x_index = (r_x * np.array(fm.x-1)).astype(int) actuals = prediction[y_index, x_index] if not ctr_loc: xc = np.random.randint(0, prediction.shape[1], 1000) yc = np.random.randint(0, prediction.shape[0], 1000) ctr_loc = (yc.astype(int), xc.astype(int)) else: if not ctr_size: ctr_size = fm.image_size else: (r_y, r_x) = calc_resize_factor(prediction, ctr_size) ctr_loc = ((r_y * np.array(ctr_loc[0])).astype(int), (r_x * np.array(ctr_loc[1])).astype(int)) controls = prediction[ctr_loc[0], ctr_loc[1]] return fast_roc(actuals, controls)[0]
approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. def fast_roc(actuals, controls): """ approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. def faster_roc(actuals, controls): """ Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) if len(actuals)<500: raise RuntimeError('This method might be incorrect when '+ 'not enough actuals are present. Needs to be checked before '+ 'proceeding. Stopping here for you to do so.') actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])+np.finfo(float).eps true_pos_rate = np.nan*np.empty(thresholds.size-1) false_pos_rate = np.nan*np.empty(thresholds.size-1) num_act = float(len(actuals)) num_ctr = float(len(controls)) actuals = 1-(np.cumsum(np.histogram(actuals, thresholds)[0])/num_act) controls = 1-(np.cumsum(np.histogram(controls, thresholds)[0])/num_ctr) true_pos_rate = actuals false_pos_rate = controls #true_pos_rate = np.concatenate(([0], true_pos_rate, [1])) false_pos_rate = false_pos_rate auc = -1*np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction def emd_model(prediction, fm): """ wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) gt = fixmat.compute_fdm(fm, scale_factor = r_x) return emd(prediction, gt)
Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1. def emd(prediction, ground_truth): """ Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1. """ import opencv if not (prediction.shape == ground_truth.shape): raise RuntimeError('Shapes of prediction and ground truth have' + ' to be equal. They are: %s, %s' %(str(prediction.shape), str(ground_truth.shape))) (x, y) = np.meshgrid(list(range(0, prediction.shape[1])), list(range(0, prediction.shape[0]))) s1 = np.array([x.flatten(), y.flatten(), prediction.flatten()]).T s2 = np.array([x.flatten(), y.flatten(), ground_truth.flatten()]).T s1m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) s2m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) for r in range(0, s1.shape[0]): for c in range(0, s1.shape[1]): s1m[r, c] = float(s1[r, c]) s2m[r, c] = float(s2[r, c]) d = opencv.cvCalcEMD2(s1m, s2m, opencv.CV_DIST_L2) return d
Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. 5. def _rfc822(date): """Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. 5. """ daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']) months = { 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, } timezonenames = { 'ut': 0, 'gmt': 0, 'z': 0, 'adt': -3, 'ast': -4, 'at': -4, 'edt': -4, 'est': -5, 'et': -5, 'cdt': -5, 'cst': -6, 'ct': -6, 'mdt': -6, 'mst': -7, 'mt': -7, 'pdt': -7, 'pst': -8, 'pt': -8, 'a': -1, 'n': 1, 'm': -12, 'y': 12, } parts = date.lower().split() if len(parts) < 5: # Assume that the time and timezone are missing parts.extend(('00:00:00', '0000')) # Remove the day name if parts[0][:3] in daynames: parts = parts[1:] if len(parts) < 5: # If there are still fewer than five parts, there's not enough # information to interpret this return None try: day = int(parts[0]) except ValueError: # Check if the day and month are swapped if months.get(parts[0][:3]): try: day = int(parts[1]) except ValueError: return None else: parts[1] = parts[0] else: return None month = months.get(parts[1][:3]) if not month: return None try: year = int(parts[2]) except ValueError: return None # Normalize two-digit years: # Anything in the 90's is interpreted as 1990 and on # Anything 89 or less is interpreted as 2089 or before if len(parts[2]) <= 2: year += (1900, 2000)[year < 90] timeparts = parts[3].split(':') timeparts = timeparts + ([0] * (3 - len(timeparts))) try: (hour, minute, second) = map(int, timeparts) except ValueError: return None tzhour = 0 tzmin = 0 # Strip 'Etc/' from the timezone if parts[4].startswith('etc/'): parts[4] = parts[4][4:] # Normalize timezones that start with 'gmt': # GMT-05:00 => -0500 # GMT => GMT if parts[4].startswith('gmt'): parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt' # Handle timezones like '-0500', '+0500', and 'EST' if parts[4] and parts[4][0] in ('-', '+'): try: tzhour = int(parts[4][1:3]) tzmin = int(parts[4][3:]) except ValueError: return None if parts[4].startswith('-'): tzhour = tzhour * -1 tzmin = tzmin * -1 else: tzhour = timezonenames.get(parts[4], 0) # Create the datetime object and timezone delta objects try: stamp = datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in a UTC 9-tuple try: return stamp - delta except OverflowError: return None
_to_rfc822(datetime.datetime) -> str The datetime `strftime` method is subject to locale-specific day and month names, so this function hardcodes the conversion. def _to_rfc822(date): """_to_rfc822(datetime.datetime) -> str The datetime `strftime` method is subject to locale-specific day and month names, so this function hardcodes the conversion.""" months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] fmt = '{day}, {d:02} {month} {y:04} {h:02}:{m:02}:{s:02} GMT' return fmt.format( day=days[date.weekday()], d=date.day, month=months[date.month - 1], y=date.year, h=date.hour, m=date.minute, s=date.second, )
Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and the ordinal parameters (|list|). def format(self, sql, params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and the ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("params:{!r} is not a dict.".format(params)) # Find named parameters. names = self.match.findall(sql) # Map named parameters to ordinals. ord_params = [] name_to_ords = {} for name in names: value = params[name] if isinstance(value, tuple): ord_params.extend(value) if name not in name_to_ords: name_to_ords[name] = '(' + ','.join((self.replace,) * len(value)) + ')' else: ord_params.append(value) if name not in name_to_ords: name_to_ords[name] = self.replace # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, ord_params
Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|). def formatmany(self, sql, many_params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if not isinstance(many_params, collections.Iterable) or isinstance(many_params, (unicode, bytes)): raise TypeError("many_params:{!r} is not iterable.".format(many_params)) # Find named parameters. names = self.match.findall(sql) name_set = set(names) # Map named parameters to ordinals. many_ord_params = [] name_to_ords = {} name_to_len = {} repl_str = self.replace repl_tuple = (repl_str,) for i, params in enumerate(many_params): if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("many_params[{}]:{!r} is not a dict.".format(i, params)) if not i: # first # Map names to ordinals, and determine what names are tuples and # what their lengths are. for name in name_set: value = params[name] if isinstance(value, tuple): tuple_len = len(value) name_to_ords[name] = '(' + ','.join(repl_tuple * tuple_len) + ')' name_to_len[name] = tuple_len else: name_to_ords[name] = repl_str name_to_len[name] = None # Make sure tuples match up and collapse tuples into ordinals. ord_params = [] for name in names: value = params[name] tuple_len = name_to_len[name] if tuple_len is not None: if not isinstance(value, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, name, value)) elif len(value) != tuple_len: raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, name, value, tuple_len)) ord_params.extend(value) else: ord_params.append(value) many_ord_params.append(ord_params) # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, many_ord_params
Gets the parser for the command f, if it not exists it creates a new one def _get_parser(f): """ Gets the parser for the command f, if it not exists it creates a new one """ _COMMAND_GROUPS[f.__module__].load() if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers: parser = _COMMAND_GROUPS[f.__module__].parser_generator.add_parser(f.__name__, help=f.__doc__, description=f.__doc__) parser.set_defaults(func=f) _COMMAND_GROUPS[f.__module__].parsers[f.__name__] = parser return _COMMAND_GROUPS[f.__module__].parsers[f.__name__]
Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions def findMentions(sourceURL, targetURL=None, exclude_domains=[], content=None, test_urls=True, headers={}, timeout=None): """Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions """ __doc__ = None if test_urls: URLValidator(message='invalid source URL')(sourceURL) if content: result = {'status': requests.codes.ok, 'headers': None, } else: r = requests.get(sourceURL, verify=True, headers=headers, timeout=timeout) result = {'status': r.status_code, 'headers': r.headers } # Check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): content = r.text else: content = r.content result.update({'refs': set(), 'post-url': sourceURL}) if result['status'] == requests.codes.ok: # Allow passing BS doc as content if isinstance(content, BeautifulSoup): __doc__ = content # result.update({'content': unicode(__doc__)}) result.update({'content': str(__doc__)}) else: __doc__ = BeautifulSoup(content, _html_parser) result.update({'content': content}) # try to find first h-entry else use full document entry = __doc__.find(class_="h-entry") or __doc__ # Allow finding particular URL if targetURL: # find only targetURL all_links = entry.find_all('a', href=targetURL) else: # find all links with a href all_links = entry.find_all('a', href=True) for link in all_links: href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): if url.hostname and url.hostname not in exclude_domains: result['refs'].add(href) return result
Search the given html content for all <link /> elements and return any discovered WebMention URL. :param html: html content :rtype: WebMention URL def findEndpoint(html): """Search the given html content for all <link /> elements and return any discovered WebMention URL. :param html: html content :rtype: WebMention URL """ poss_rels = ['webmention', 'http://webmention.org', 'http://webmention.org/', 'https://webmention.org', 'https://webmention.org/'] # find elements with correct rels and a href value all_links = BeautifulSoup(html, _html_parser).find_all(rel=poss_rels, href=True) for link in all_links: s = link.get('href', None) if s is not None: return s return None
Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug]) def discoverEndpoint(url, test_urls=True, headers={}, timeout=None, request=None, debug=False): """Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug]) """ if test_urls: URLValidator(message='invalid URL')(url) # status, webmention endpointURL = None debugOutput = [] try: if request is not None: targetRequest = request else: targetRequest = requests.get(url, verify=False, headers=headers, timeout=timeout) returnCode = targetRequest.status_code debugOutput.append('%s %s' % (returnCode, url)) if returnCode == requests.codes.ok: try: linkHeader = parse_link_header(targetRequest.headers['link']) endpointURL = linkHeader.get('webmention', '') or \ linkHeader.get('http://webmention.org', '') or \ linkHeader.get('http://webmention.org/', '') or \ linkHeader.get('https://webmention.org', '') or \ linkHeader.get('https://webmention.org/', '') # force searching in the HTML if not found if not endpointURL: raise AttributeError debugOutput.append('found in link headers') except (KeyError, AttributeError): endpointURL = findEndpoint(targetRequest.text) debugOutput.append('found in body') if endpointURL is not None: endpointURL = urljoin(url, endpointURL) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') returnCode = 500 debugOutput.append('endpointURL: %s %s' % (returnCode, endpointURL)) if debug: return (returnCode, endpointURL, debugOutput) else: return (returnCode, endpointURL)
Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid def sendWebmention(sourceURL, targetURL, webmention=None, test_urls=True, vouchDomain=None, headers={}, timeout=None, debug=False): """Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid """ if test_urls: v = URLValidator() v(sourceURL) v(targetURL) debugOutput = [] originalURL = targetURL try: targetRequest = requests.get(targetURL) if targetRequest.status_code == requests.codes.ok: if len(targetRequest.history) > 0: redirect = targetRequest.history[-1] if (redirect.status_code == 301 or redirect.status_code == 302) and 'Location' in redirect.headers: targetURL = urljoin(targetURL, redirect.headers['Location']) debugOutput.append('targetURL redirected: %s' % targetURL) if webmention is None: wStatus, wUrl = discoverEndpoint(targetURL, headers=headers, timeout=timeout, request=targetRequest) else: wStatus = 200 wUrl = webmention debugOutput.append('endpointURL: %s %s' % (wStatus, wUrl)) if wStatus == requests.codes.ok and wUrl is not None: if test_urls: v(wUrl) payload = {'source': sourceURL, 'target': originalURL} if vouchDomain is not None: payload['vouch'] = vouchDomain try: result = requests.post(wUrl, data=payload, headers=headers, timeout=timeout) debugOutput.append('POST %s -- %s' % (wUrl, result.status_code)) if result.status_code == 405 and len(result.history) > 0: redirect = result.history[-1] if redirect.status_code == 301 and 'Location' in redirect.headers: result = requests.post(redirect.headers['Location'], data=payload, headers=headers, timeout=timeout) debugOutput.append('redirected POST %s -- %s' % (redirect.headers['Location'], result.status_code)) except Exception as e: result = None except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') result = None return result
takes the link header as a string and returns a dictionary with rel values as keys and urls as values :param link: link header as a string :rtype: dictionary {rel_name: rel_value} def parse_link_header(link): """takes the link header as a string and returns a dictionary with rel values as keys and urls as values :param link: link header as a string :rtype: dictionary {rel_name: rel_value} """ rel_dict = {} for rels in link.split(','): rel_break = quoted_split(rels, ';') try: rel_url = re.search('<(.+?)>', rel_break[0]).group(1) rel_names = quoted_split(rel_break[1], '=')[-1] if rel_names.startswith('"') and rel_names.endswith('"'): rel_names = rel_names[1:-1] for name in rel_names.split(): rel_dict[name] = rel_url except (AttributeError, IndexError): pass return rel_dict
Find all <a /> elements in the given html for a post. If any have an href attribute that is rel="me" then include it in the result. :param sourceURL: the URL for the post we are scanning :rtype: dictionary of RelMe references def findRelMe(sourceURL): """Find all <a /> elements in the given html for a post. If any have an href attribute that is rel="me" then include it in the result. :param sourceURL: the URL for the post we are scanning :rtype: dictionary of RelMe references """ r = requests.get(sourceURL) result = {'status': r.status_code, 'headers': r.headers, 'history': r.history, 'content': r.text, 'relme': [], 'url': sourceURL } if r.status_code == requests.codes.ok: dom = BeautifulSoup(r.text, _html_parser) for link in dom.find_all('a', rel='me'): rel = link.get('rel') href = link.get('href') if rel is not None and href is not None: url = urlparse(href) if url is not None and url.scheme in ('http', 'https'): result['relme'].append(cleanURL(href)) return result
Determine if a given :resourceURL: is authoritative for the :profileURL: TODO add https/http filtering for those who wish to limit/restrict urls to match fully TODO add code to ensure that each item in the redirect chain is authoritative :param profileURL: URL of the user :param resourceURL: URL of the resource to validate :param profileRelMes: optional list of rel="me" links within the profile URL :param resourceRelMes: optional list of rel="me" links found within resource URL :rtype: True if confirmed def confirmRelMe(profileURL, resourceURL, profileRelMes=None, resourceRelMes=None): """Determine if a given :resourceURL: is authoritative for the :profileURL: TODO add https/http filtering for those who wish to limit/restrict urls to match fully TODO add code to ensure that each item in the redirect chain is authoritative :param profileURL: URL of the user :param resourceURL: URL of the resource to validate :param profileRelMes: optional list of rel="me" links within the profile URL :param resourceRelMes: optional list of rel="me" links found within resource URL :rtype: True if confirmed """ result = False profile = normalizeURL(profileURL) if profileRelMes is None: profileRelMe = findRelMe(profileURL) profileRelMes = profileRelMe['relme'] if resourceRelMes is None: resourceRelMe = findRelMe(resourceURL) resourceRelMes = resourceRelMe['relme'] for url in resourceRelMes: if profile in (url, normalizeURL(url)): result = True break return result
Indent every line of text in a newline-delimited string def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string""" indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash. def download(url, target, headers=None, trackers=()): """Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash. """ if headers is None: headers = {} headers.setdefault('user-agent', 'requests_download/'+__version__) r = requests.get(url, headers=headers, stream=True) r.raise_for_status() for t in trackers: t.on_start(r) with open(target, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) for t in trackers: t.on_chunk(chunk) for t in trackers: t.on_finish()
Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does. def write(parsed_obj, spec=None, filename=None): """Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does. """ if not isinstance(parsed_obj, BreadStruct): raise ValueError( 'Object to write must be a structure created ' 'by bread.parse') if filename is not None: with open(filename, 'wb') as fp: parsed_obj._data_bits[:parsed_obj._length].tofile(fp) else: return bytearray(parsed_obj._data_bits[:parsed_obj._length].tobytes())
Uploads a file to an S3 bucket, as a public file. def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """ # Paths look like: # index.html # css/bootstrap.min.css logger.info("Deploying {0}".format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') except socket.error: logger.warning("Caught socket.error while trying to upload {0}".format( file_path)) msg = "Please file an issue with alotofeffort if you see this," logger.warning(msg) logger.warning("providing as much info as you can.")
Deploy to the configured S3 bucket. def deploy(www_dir, bucket_name): """ Deploy to the configured S3 bucket. """ # Set up the connection to an S3 bucket. conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) # Deploy each changed file in www_dir os.chdir(www_dir) for root, dirs, files in os.walk('.'): for f in files: # Use full relative path. Normalize to remove dot. file_path = os.path.normpath(os.path.join(root, f)) if has_changed_since_last_deploy(file_path, bucket): deploy_file(file_path, bucket) else: logger.info("Skipping {0}".format(file_path)) # Make the whole bucket public bucket.set_acl('public-read') # Configure it to be a website bucket.configure_website('index.html', 'error.html') # Print the endpoint, so you know the URL msg = "Your website is now live at {0}".format( bucket.get_website_endpoint()) logger.info(msg) logger.info("If you haven't done so yet, point your domain name there!")
Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False. def has_changed_since_last_deploy(file_path, bucket): """ Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False. """ msg = "Checking if {0} has changed since last deploy.".format(file_path) logger.debug(msg) with open(file_path) as f: data = f.read() file_md5 = hashlib.md5(data.encode('utf-8')).hexdigest() logger.debug("file_md5 is {0}".format(file_md5)) key = bucket.get_key(file_path) # HACK: Boto's md5 property does not work when the file hasn't been # downloaded. The etag works but will break for multi-part uploaded files. # http://stackoverflow.com/questions/16872679/how-to-programmatically- # get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096 # Also the double quotes around it must be stripped. Sketchy...boto's fault if key: key_md5 = key.etag.replace('"', '').strip() logger.debug("key_md5 is {0}".format(key_md5)) else: logger.debug("File does not exist in bucket") return True if file_md5 == key_md5: logger.debug("File has not changed.") return False logger.debug("File has changed.") return True
Entry point for the package, as defined in setup.py. def main(): """ Entry point for the package, as defined in setup.py. """ # Log info and above to console logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.INFO) # Get command line input/output arguments msg = 'Instantly deploy static HTML sites to S3 at the command line.' parser = argparse.ArgumentParser(description=msg) parser.add_argument( 'www_dir', help='Directory containing the HTML files for your website.' ) parser.add_argument( 'bucket_name', help='Name of S3 bucket to deploy to, e.g. mybucket.' ) args = parser.parse_args() # Deploy the site to S3! deploy(args.www_dir, args.bucket_name)
This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None def start_sikuli_process(self, port=None): """ This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None """ if port is None or int(port) == 0: port = self._get_free_tcp_port() self.port = port start_retries = 0 started = False while start_retries < 5: try: self._start_sikuli_java_process() except RuntimeError as err: print('error........%s' % err) if self.process: self.process.terminate_process() self.port = self._get_free_tcp_port() start_retries += 1 continue started = True break if not started: raise RuntimeError('Start sikuli java process failed!') self.remote = self._connect_remote_library()
Respond to POSTed username/password with token. def post(self, request): """Respond to POSTed username/password with token.""" serializer = AuthTokenSerializer(data=request.data) if serializer.is_valid(): token, _ = ExpiringToken.objects.get_or_create( user=serializer.validated_data['user'] ) if token.expired(): # If the token is expired, generate a new one. token.delete() token = ExpiringToken.objects.create( user=serializer.validated_data['user'] ) data = {'token': token.key} return Response(data) return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days. def EXPIRING_TOKEN_LIFESPAN(self): """ Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days. """ try: val = settings.EXPIRING_TOKEN_LIFESPAN except AttributeError: val = timedelta(days=30) return val
Return boolean indicating token expiration. def expired(self): """Return boolean indicating token expiration.""" now = timezone.now() if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN: return True return False
Test if a token is made entirely of Unicode characters of the following classes: - P: punctuation - S: symbols - Z: separators - M: combining marks - C: control characters >>> unicode_is_punctuation('word') False >>> unicode_is_punctuation('。') True >>> unicode_is_punctuation('-') True >>> unicode_is_punctuation('-3') False >>> unicode_is_punctuation('あ') False def unicode_is_punctuation(text): """ Test if a token is made entirely of Unicode characters of the following classes: - P: punctuation - S: symbols - Z: separators - M: combining marks - C: control characters >>> unicode_is_punctuation('word') False >>> unicode_is_punctuation('。') True >>> unicode_is_punctuation('-') True >>> unicode_is_punctuation('-3') False >>> unicode_is_punctuation('あ') False """ for char in str_func(text): category = unicodedata.category(char)[0] if category not in 'PSZMC': return False return True
Store the actual process in _process. If it doesn't exist yet, create it. def process(self): """ Store the actual process in _process. If it doesn't exist yet, create it. """ if hasattr(self, '_process'): return self._process else: self._process = self._get_process() return self._process
Create the process by running the specified command. def _get_process(self): """ Create the process by running the specified command. """ command = self._get_command() return subprocess.Popen(command, bufsize=-1, close_fds=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
Split a text into separate words. def tokenize_list(self, text): """ Split a text into separate words. """ return [self.get_record_token(record) for record in self.analyze(text)]
Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords. def is_stopword(self, text): """ Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords. """ found_content_word = False for record in self.analyze(text): if not self.is_stopword_record(record): found_content_word = True break return not found_content_word
Get a canonical list representation of text, with words separated and reduced to their base forms. TODO: use the cache. def normalize_list(self, text, cache=None): """ Get a canonical list representation of text, with words separated and reduced to their base forms. TODO: use the cache. """ words = [] analysis = self.analyze(text) for record in analysis: if not self.is_stopword_record(record): words.append(self.get_record_root(record)) if not words: # Don't discard stopwords if that's all you've got words = [self.get_record_token(record) for record in analysis] return words
Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). def tag_and_stem(self, text, cache=None): """ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). """ analysis = self.analyze(text) triples = [] for record in analysis: root = self.get_record_root(record) token = self.get_record_token(record) if token: if unicode_is_punctuation(token): triples.append((token, '.', token)) else: pos = self.get_record_pos(record) triples.append((root, pos, token)) return triples
Given some text, extract phrases of up to 2 content words, and map their normalized form to the complete phrase. def extract_phrases(self, text): """ Given some text, extract phrases of up to 2 content words, and map their normalized form to the complete phrase. """ analysis = self.analyze(text) for pos1 in range(len(analysis)): rec1 = analysis[pos1] if not self.is_stopword_record(rec1): yield self.get_record_root(rec1), rec1[0] for pos2 in range(pos1 + 1, len(analysis)): rec2 = analysis[pos2] if not self.is_stopword_record(rec2): roots = [self.get_record_root(rec1), self.get_record_root(rec2)] pieces = [analysis[i][0] for i in range(pos1, pos2+1)] term = ' '.join(roots) phrase = ''.join(pieces) yield term, phrase break
Use MeCab to turn any text into its phonetic spelling, as katakana separated by spaces. def to_kana(text): """ Use MeCab to turn any text into its phonetic spelling, as katakana separated by spaces. """ records = MECAB.analyze(text) kana = [] for record in records: if record.pronunciation: kana.append(record.pronunciation) elif record.reading: kana.append(record.reading) else: kana.append(record.surface) return ' '.join(k for k in kana if k)
Return two things about each character: - Its transliterated value (in Roman characters, if it's a kana) - A class of characters indicating how it affects the romanization def get_kana_info(char): """ Return two things about each character: - Its transliterated value (in Roman characters, if it's a kana) - A class of characters indicating how it affects the romanization """ try: name = unicodedata.name(char) except ValueError: return char, NOT_KANA # The names we're dealing with will probably look like # "KATAKANA CHARACTER ZI". if (name.startswith('HIRAGANA LETTER') or name.startswith('KATAKANA LETTER') or name.startswith('KATAKANA-HIRAGANA')): names = name.split() syllable = str_func(names[-1].lower()) if name.endswith('SMALL TU'): # The small tsu (っ) doubles the following consonant. # It'll show up as 't' on its own. return 't', SMALL_TSU elif names[-1] == 'N': return 'n', NN elif names[1] == 'PROLONGED': # The prolongation marker doubles the previous vowel. # It'll show up as '_' on its own. return '_', PROLONG elif names[-2] == 'SMALL': # Small characters tend to modify the sound of the previous # kana. If they can't modify anything, they're appended to # the letter 'x' instead. if syllable.startswith('y'): return 'x' + syllable, SMALL_Y else: return 'x' + syllable, SMALL return syllable, KANA else: if char in ROMAN_PUNCTUATION_TABLE: char = ROMAN_PUNCTUATION_TABLE[char] return char, NOT_KANA
Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word. def analyze(self, text): """ Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word. """ try: self.process # make sure things are loaded text = render_safe(text).replace('\n', ' ').lower() results = [] for chunk in string_pieces(text): self.send_input((chunk + '\n').encode('utf-8')) while True: out_line = self.receive_output_line().decode('utf-8') if out_line == 'EOS\n': break word, info = out_line.strip('\n').split('\t') record_parts = [word] + info.split(',') # Pad the record out to have 10 parts if it doesn't record_parts += [None] * (10 - len(record_parts)) record = MeCabRecord(*record_parts) # special case for detecting nai -> n if (record.surface == 'ん' and record.conjugation == '不変化型'): # rebuild the record so that record.root is 'nai' record_parts[MeCabRecord._fields.index('root')] = 'ない' record = MeCabRecord(*record_parts) results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
Determine whether a single MeCab record represents a stopword. This mostly determines words to strip based on their parts of speech. If common_words is set to True (default), it will also strip common verbs and nouns such as くる and よう. If more_stopwords is True, it will look at the sub-part of speech to remove more categories. def is_stopword_record(self, record): """ Determine whether a single MeCab record represents a stopword. This mostly determines words to strip based on their parts of speech. If common_words is set to True (default), it will also strip common verbs and nouns such as くる and よう. If more_stopwords is True, it will look at the sub-part of speech to remove more categories. """ # preserve negations if record.root == 'ない': return False return ( record.pos in STOPWORD_CATEGORIES or record.subclass1 in STOPWORD_CATEGORIES or record.root in STOPWORD_ROOTS )
Given a record, get the word's part of speech. Here we're going to return MeCab's part of speech (written in Japanese), though if it's a stopword we prefix the part of speech with '~'. def get_record_pos(self, record): """ Given a record, get the word's part of speech. Here we're going to return MeCab's part of speech (written in Japanese), though if it's a stopword we prefix the part of speech with '~'. """ if self.is_stopword_record(record): return '~' + record.pos else: return record.pos
Run text through the external process, and get a list of lists ("records") that contain the analysis of each word. def analyze(self, text): """ Run text through the external process, and get a list of lists ("records") that contain the analysis of each word. """ try: text = render_safe(text).strip() if not text: return [] chunks = text.split('\n') results = [] for chunk_text in chunks: if chunk_text.strip(): textbytes = (chunk_text + '\n').encode('utf-8') self.send_input(textbytes) out_line = '' while True: out_line = self.receive_output_line() out_line = out_line.decode('utf-8') if out_line == '\n': break record = out_line.strip('\n').split(' ') results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
r""" Splits apart words that are written in CamelCase. Bugs: - Non-ASCII characters are treated as lowercase letters, even if they are actually capital letters. Examples: >>> un_camel_case('1984ZXSpectrumGames') '1984 ZX Spectrum Games' >>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA') 'aa Aa aa Aa A 0a A AA Aa! AAA' >>> un_camel_case('MotörHead') 'Mot\xf6r Head' >>> un_camel_case('MSWindows3.11ForWorkgroups') 'MS Windows 3.11 For Workgroups' This should not significantly affect text that is not camel-cased: >>> un_camel_case('ACM_Computing_Classification_System') 'ACM Computing Classification System' >>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth') 'Anne Blunt, 15th Baroness Wentworth' >>> un_camel_case('Hindi-Urdu') 'Hindi-Urdu' def un_camel_case(text): r""" Splits apart words that are written in CamelCase. Bugs: - Non-ASCII characters are treated as lowercase letters, even if they are actually capital letters. Examples: >>> un_camel_case('1984ZXSpectrumGames') '1984 ZX Spectrum Games' >>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA') 'aa Aa aa Aa A 0a A AA Aa! AAA' >>> un_camel_case('MotörHead') 'Mot\xf6r Head' >>> un_camel_case('MSWindows3.11ForWorkgroups') 'MS Windows 3.11 For Workgroups' This should not significantly affect text that is not camel-cased: >>> un_camel_case('ACM_Computing_Classification_System') 'ACM Computing Classification System' >>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth') 'Anne Blunt, 15th Baroness Wentworth' >>> un_camel_case('Hindi-Urdu') 'Hindi-Urdu' """ revtext = text[::-1] pieces = [] while revtext: match = CAMEL_RE.match(revtext) if match: pieces.append(match.group(1)) revtext = revtext[match.end():] else: pieces.append(revtext) revtext = '' revstr = ' '.join(piece.strip(' _') for piece in pieces if piece.strip(' _')) return revstr[::-1].replace('- ', '-')
Takes a (unicode) string and yields pieces of it that are at most `maxlen` characters, trying to break it at punctuation/whitespace. This is an important step before using a tokenizer with a maximum buffer size. def string_pieces(s, maxlen=1024): """ Takes a (unicode) string and yields pieces of it that are at most `maxlen` characters, trying to break it at punctuation/whitespace. This is an important step before using a tokenizer with a maximum buffer size. """ if not s: return i = 0 while True: j = i + maxlen if j >= len(s): yield s[i:] return # Using "j - 1" keeps boundary characters with the left chunk while unicodedata.category(s[j - 1]) not in BOUNDARY_CATEGORIES: j -= 1 if j == i: # No boundary available; oh well. j = i + maxlen break yield s[i:j] i = j
Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems. def _word_badness(word): """ Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems. """ if word.endswith('e'): return len(word) - 2 elif word.endswith('ess'): return len(word) - 10 elif word.endswith('ss'): return len(word) - 4 else: return len(word)
Get the most likely stem for a word using Morphy, once the input has been pre-processed by morphy_stem(). def _morphy_best(word, pos=None): """ Get the most likely stem for a word using Morphy, once the input has been pre-processed by morphy_stem(). """ results = [] if pos is None: pos = 'nvar' for pos_item in pos: results.extend(morphy(word, pos_item)) if not results: return None results.sort(key=lambda x: _word_badness(x)) return results[0]
Get the most likely stem for a word. If a part of speech is supplied, the stem will be more accurate. Valid parts of speech are: - 'n' or 'NN' for nouns - 'v' or 'VB' for verbs - 'a' or 'JJ' for adjectives - 'r' or 'RB' for adverbs Any other part of speech will be treated as unknown. def morphy_stem(word, pos=None): """ Get the most likely stem for a word. If a part of speech is supplied, the stem will be more accurate. Valid parts of speech are: - 'n' or 'NN' for nouns - 'v' or 'VB' for verbs - 'a' or 'JJ' for adjectives - 'r' or 'RB' for adverbs Any other part of speech will be treated as unknown. """ word = word.lower() if pos is not None: if pos.startswith('NN'): pos = 'n' elif pos.startswith('VB'): pos = 'v' elif pos.startswith('JJ'): pos = 'a' elif pos.startswith('RB'): pos = 'r' if pos is None and word.endswith('ing') or word.endswith('ed'): pos = 'v' if pos is not None and pos not in 'nvar': pos = None if word in EXCEPTIONS: return EXCEPTIONS[word] if pos is None: if word in AMBIGUOUS_EXCEPTIONS: return AMBIGUOUS_EXCEPTIONS[word] return _morphy_best(word, pos) or word
Returns a list of (stem, tag, token) triples: - stem: the word's uninflected form - tag: the word's part of speech - token: the original word, so we can reconstruct it later def tag_and_stem(text): """ Returns a list of (stem, tag, token) triples: - stem: the word's uninflected form - tag: the word's part of speech - token: the original word, so we can reconstruct it later """ tokens = tokenize(text) tagged = nltk.pos_tag(tokens) out = [] for token, tag in tagged: stem = morphy_stem(token, tag) out.append((stem, tag, token)) return out
Get a list of word stems that appear in the text. Stopwords and an initial 'to' will be stripped, unless this leaves nothing in the stem. >>> normalize_list('the dog') ['dog'] >>> normalize_list('big dogs') ['big', 'dog'] >>> normalize_list('the') ['the'] def normalize_list(text): """ Get a list of word stems that appear in the text. Stopwords and an initial 'to' will be stripped, unless this leaves nothing in the stem. >>> normalize_list('the dog') ['dog'] >>> normalize_list('big dogs') ['big', 'dog'] >>> normalize_list('the') ['the'] """ pieces = [morphy_stem(word) for word in tokenize(text)] pieces = [piece for piece in pieces if good_lemma(piece)] if not pieces: return [text] if pieces[0] == 'to': pieces = pieces[1:] return pieces
Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
split key to elements def key2elements(key): """split key to elements""" # words = key.split('.') # if len(words) == 4: # return words # # there is a dot in object name # fieldword = words.pop(-1) # nameword = '.'.join(words[-2:]) # if nameword[-1] in ('"', "'"): # # The object name is in quotes # nameword = nameword[1:-1] # elements = words[:-2] + [nameword, fieldword, ] # return elements words = key.split('.') first2words = words[:2] lastword = words[-1] namewords = words[2:-1] namephrase = '.'.join(namewords) if namephrase.startswith("'") and namephrase.endswith("'"): namephrase = namephrase[1:-1] return first2words + [namephrase] + [lastword]
update idf using dct def updateidf(idf, dct): """update idf using dct""" for key in list(dct.keys()): if key.startswith('idf.'): idftag, objkey, objname, field = key2elements(key) if objname == '': try: idfobj = idf.idfobjects[objkey.upper()][0] except IndexError as e: idfobj = idf.newidfobject(objkey.upper()) else: idfobj = idf.getobject(objkey.upper(), objname) if idfobj == None: idfobj = idf.newidfobject(objkey.upper(), Name=objname) idfobj[field] = dct[key]
return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s) def fan_bhp(fan_tot_eff, pascal, m3s): """return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # from discussion in # http://energy-models.com/forum/baseline-fan-power-calculation inh2o = pascal2inh2o(pascal) cfm = m3s2cfm(m3s) return (cfm * inh2o * 1.0) / (6356.0 * fan_tot_eff)
return inputs for E+ in pascal and m3/s def bhp2pascal(bhp, cfm, fan_tot_eff): """return inputs for E+ in pascal and m3/s""" inh2o = bhp * 6356.0 * fan_tot_eff / cfm pascal = inh2o2pascal(inh2o) m3s = cfm2m3s(cfm) return pascal, m3s
return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s) def fan_watts(fan_tot_eff, pascal, m3s): """return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # got this from a google search bhp = fan_bhp(fan_tot_eff, pascal, m3s) return bhp2watts(bhp)