_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q272900
XMeans.compute_bic
test
def compute_bic(self, D, means, labels, K, R): """Computes the Bayesian Information Criterion.""" D = vq.whiten(D) Rn = D.shape[0] M = D.shape[1] if R == K: return 1 # Maximum likelihood estimate (MLE) mle_var = 0 for k in range(len(means)): X = D[np.argwhere(labels == k)] X = X.reshape((X.shape[0], X.shape[-1])) for x in X: mle_var += distance.euclidean(x, means[k]) #print x, means[k], mle_var mle_var /= float(R - K) # Log-likelihood of the data l_D = - Rn/2. * np.log(2*np.pi) - (Rn * M)/2. * np.log(mle_var) - \ (Rn - K) / 2. + Rn * np.log(Rn) - Rn * np.log(R) # Params of BIC p = (K-1) + M * K + mle_var #print "BIC:", l_D, p, R, K # Return the bic return l_D - p / 2. * np.log(R)
python
{ "resource": "" }
q272901
magnitude
test
def magnitude(X): """Magnitude of a complex matrix.""" r = np.real(X) i = np.imag(X) return np.sqrt(r * r + i * i);
python
{ "resource": "" }
q272902
json_to_bounds
test
def json_to_bounds(segments_json): """Extracts the boundaries from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] bounds = [] for segment in segments: bounds.append(segment["start"]) bounds.append(bounds[-1] + segments[-1]["duration"]) # Add last boundary f.close() return np.asarray(bounds)
python
{ "resource": "" }
q272903
json_bounds_to_bounds
test
def json_bounds_to_bounds(bounds_json): """Extracts the boundaries from a bounds json file and puts them into an np array.""" f = open(bounds_json) segments = json.load(f)["bounds"] bounds = [] for segment in segments: bounds.append(segment["start"]) f.close() return np.asarray(bounds)
python
{ "resource": "" }
q272904
json_to_labels
test
def json_to_labels(segments_json): """Extracts the labels from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] labels = [] str_labels = [] for segment in segments: if not segment["label"] in str_labels: str_labels.append(segment["label"]) labels.append(len(str_labels)-1) else: label_idx = np.where(np.asarray(str_labels) == segment["label"])[0][0] labels.append(label_idx) f.close() return np.asarray(labels)
python
{ "resource": "" }
q272905
json_to_beats
test
def json_to_beats(beats_json_file): """Extracts the beats from the beats_json_file and puts them into an np array.""" f = open(beats_json_file, "r") beats_json = json.load(f) beats = [] for beat in beats_json["beats"]: beats.append(beat["start"]) f.close() return np.asarray(beats)
python
{ "resource": "" }
q272906
compute_ffmc2d
test
def compute_ffmc2d(X): """Computes the 2D-Fourier Magnitude Coefficients.""" # 2d-fft fft2 = scipy.fftpack.fft2(X) # Magnitude fft2m = magnitude(fft2) # FFTshift and flatten fftshift = scipy.fftpack.fftshift(fft2m).flatten() #cmap = plt.cm.get_cmap('hot') #plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation="nearest", # aspect="auto", cmap=cmap) #plt.show() # Take out redundant components return fftshift[:fftshift.shape[0] // 2 + 1]
python
{ "resource": "" }
q272907
compute_labels
test
def compute_labels(X, rank, R, bound_idxs, niter=300): """Computes the labels using the bounds.""" try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return [1] label_frames = filter_activation_matrix(G.T, R) label_frames = np.asarray(label_frames, dtype=int) #labels = [label_frames[0]] labels = [] bound_inters = zip(bound_idxs[:-1], bound_idxs[1:]) for bound_inter in bound_inters: if bound_inter[1] - bound_inter[0] <= 0: labels.append(np.max(label_frames) + 1) else: labels.append(most_frequent( label_frames[bound_inter[0]: bound_inter[1]])) #print bound_inter, labels[-1] #labels.append(label_frames[-1]) return labels
python
{ "resource": "" }
q272908
filter_activation_matrix
test
def filter_activation_matrix(G, R): """Filters the activation matrix G, and returns a flattened copy.""" #import pylab as plt #plt.imshow(G, interpolation="nearest", aspect="auto") #plt.show() idx = np.argmax(G, axis=1) max_idx = np.arange(G.shape[0]) max_idx = (max_idx, idx.flatten()) G[:, :] = 0 G[max_idx] = idx + 1 # TODO: Order matters? G = np.sum(G, axis=1) G = median_filter(G[:, np.newaxis], R) return G.flatten()
python
{ "resource": "" }
q272909
get_boundaries_module
test
def get_boundaries_module(boundaries_id): """Obtains the boundaries module given a boundary algorithm identificator. Parameters ---------- boundaries_id: str Boundary algorithm identificator (e.g., foote, sf). Returns ------- module: object Object containing the selected boundary module. None for "ground truth". """ if boundaries_id == "gt": return None try: module = eval(algorithms.__name__ + "." + boundaries_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % boundaries_id) if not module.is_boundary_type: raise RuntimeError("Algorithm %s can not identify boundaries!" % boundaries_id) return module
python
{ "resource": "" }
q272910
get_labels_module
test
def get_labels_module(labels_id): """Obtains the label module given a label algorithm identificator. Parameters ---------- labels_id: str Label algorithm identificator (e.g., fmc2d, cnmf). Returns ------- module: object Object containing the selected label module. None for not computing the labeling part of music segmentation. """ if labels_id is None: return None try: module = eval(algorithms.__name__ + "." + labels_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % labels_id) if not module.is_label_type: raise RuntimeError("Algorithm %s can not label segments!" % labels_id) return module
python
{ "resource": "" }
q272911
run_hierarchical
test
def run_hierarchical(audio_file, bounds_module, labels_module, frame_times, config, annotator_id=0): """Runs hierarchical algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Sanity check if bounds_module is None: raise NoHierBoundaryError("A boundary algorithm is needed when using " "hierarchical segmentation.") # Get features to make code nicer features = config["features"].features # Compute boundaries S = bounds_module.Segmenter(audio_file, **config) est_idxs, est_labels = S.processHierarchical() # Compute labels if needed if labels_module is not None and \ bounds_module.__name__ != labels_module.__name__: # Compute labels for each level in the hierarchy flat_config = deepcopy(config) flat_config["hier"] = False for i, level_idxs in enumerate(est_idxs): S = labels_module.Segmenter(audio_file, in_bound_idxs=level_idxs, **flat_config) est_labels[i] = S.processFlat()[1] # Make sure the first and last boundaries are included for each # level in the hierarchy est_times = [] cleaned_est_labels = [] for level in range(len(est_idxs)): est_level_times, est_level_labels = \ utils.process_segmentation_level( est_idxs[level], est_labels[level], features.shape[0], frame_times, config["features"].dur) est_times.append(est_level_times) cleaned_est_labels.append(est_level_labels) est_labels = cleaned_est_labels return est_times, est_labels
python
{ "resource": "" }
q272912
run_flat
test
def run_flat(file_struct, bounds_module, labels_module, frame_times, config, annotator_id): """Runs the flat algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Get features to make code nicer features = config["features"].features # Segment using the specified boundaries and labels # Case when boundaries and labels algorithms are the same if bounds_module is not None and labels_module is not None and \ bounds_module.__name__ == labels_module.__name__: S = bounds_module.Segmenter(file_struct, **config) est_idxs, est_labels = S.processFlat() # Different boundary and label algorithms else: # Identify segment boundaries if bounds_module is not None: S = bounds_module.Segmenter(file_struct, in_labels=[], **config) est_idxs, est_labels = S.processFlat() else: try: # Ground-truth boundaries est_times, est_labels = io.read_references( file_struct.audio_file, annotator_id=annotator_id) est_idxs = io.align_times(est_times, frame_times) if est_idxs[0] != 0: est_idxs = np.concatenate(([0], est_idxs)) except IOError: logging.warning("No references found for file: %s" % file_struct.audio_file) return [], [] # Label segments if labels_module is not None: if len(est_idxs) == 2: est_labels = np.array([0]) else: S = labels_module.Segmenter(file_struct, in_bound_idxs=est_idxs, **config) est_labels = S.processFlat()[1] # Make sure the first and last boundaries are included est_times, est_labels = utils.process_segmentation_level( est_idxs, est_labels, features.shape[0], frame_times, config["features"].dur) return est_times, est_labels
python
{ "resource": "" }
q272913
run_algorithms
test
def run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer. """ # Check that there are enough audio frames if config["features"].features.shape[0] <= msaf.config.minimum_frames: logging.warning("Audio file too short, or too many few beats " "estimated. Returning empty estimations.") return np.asarray([0, config["features"].dur]), \ np.asarray([0], dtype=int) # Get the corresponding modules bounds_module = get_boundaries_module(boundaries_id) labels_module = get_labels_module(labels_id) # Get the correct frame times frame_times = config["features"].frame_times # Segment audio based on type of segmentation run_fun = run_hierarchical if config["hier"] else run_flat est_times, est_labels = run_fun(file_struct, bounds_module, labels_module, frame_times, config, annotator_id) return est_times, est_labels
python
{ "resource": "" }
q272914
process_track
test
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Prepares the parameters, runs the algorithms, and saves results. Parameters ---------- file_struct: `msaf.io.FileStruct` FileStruct containing the paths of the input files (audio file, features file, reference file, output estimation file). boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array List of estimated times for the segment boundaries. est_labels: np.array List of all the labels associated segments. """ logging.info("Segmenting %s" % file_struct.audio_file) # Get features config["features"] = Features.select_features( config["feature"], file_struct, config["annot_beats"], config["framesync"]) # Get estimations est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) # Save logging.info("Writing results in: %s" % file_struct.est_file) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels
python
{ "resource": "" }
q272915
process
test
def process(in_path, annot_beats=False, feature="pcp", framesync=False, boundaries_id=msaf.config.default_bound_id, labels_id=msaf.config.default_label_id, hier=False, sonify_bounds=False, plot=False, n_jobs=4, annotator_id=0, config=None, out_bounds="out_bounds.wav", out_sr=22050): """Main process to segment a file or a collection of files. Parameters ---------- in_path: str Input path. If a directory, MSAF will function in collection mode. If audio file, MSAF will be in single file mode. annot_beats: bool Whether to use annotated beats or not. feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) framesync: str Whether to use framesync features or not (default: False -> beatsync) boundaries_id: str Identifier of the boundaries algorithm (use "gt" for groundtruth) labels_id: str Identifier of the labels algorithm (use None to not compute labels) hier : bool Whether to compute a hierarchical or flat segmentation. sonify_bounds: bool Whether to write an output audio file with the annotated boundaries or not (only available in Single File Mode). plot: bool Whether to plot the boundaries and labels against the ground truth. n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id: int Annotator identificator in the ground truth. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. out_bounds: str Path to the output for the sonified boundaries (only in single file mode, when sonify_bounds is True. out_sr : int Sampling rate for the sonified bounds. Returns ------- results : list List containing tuples of (est_times, est_labels) of estimated boundary times and estimated labels. If labels_id is None, est_labels will be a list of -1. """ # Seed random to reproduce results np.random.seed(123) # Set up configuration based on algorithms parameters if config is None: config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) config["features"] = None # Save multi-segment (hierarchical) configuration config["hier"] = hier if not os.path.exists(in_path): raise NoAudioFileError("File or directory does not exists, %s" % in_path) if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) # Use temporary file in single mode file_struct.features_file = msaf.config.features_tmp_file # Get features config["features"] = Features.select_features( feature, file_struct, annot_beats, framesync) # And run the algorithms est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) if sonify_bounds: logging.info("Sonifying boundaries in %s..." % out_bounds) audio_hq, sr = librosa.load(in_path, sr=out_sr) utils.sonify_clicks(audio_hq, est_times, out_bounds, out_sr) if plot: plotting.plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id) # TODO: Only save if needed # Save estimations msaf.utils.ensure_dir(os.path.dirname(file_struct.est_file)) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels else: # Collection mode file_structs = io.get_dataset_files(in_path) return Parallel(n_jobs=n_jobs)(delayed(process_track)( file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) for file_struct in file_structs[:])
python
{ "resource": "" }
q272916
AA.update_w
test
def update_w(self): """ alternating least squares step, update W under the convexity constraint """ def update_single_w(i): """ compute single W[:,i] """ # optimize beta using qp solver from cvxopt FB = base.matrix(np.float64(np.dot(-self.data.T, W_hat[:,i]))) be = solvers.qp(HB, FB, INQa, INQb, EQa, EQb) self.beta[i,:] = np.array(be['x']).reshape((1, self._num_samples)) # float64 required for cvxopt HB = base.matrix(np.float64(np.dot(self.data[:,:].T, self.data[:,:]))) EQb = base.matrix(1.0, (1, 1)) W_hat = np.dot(self.data, pinv(self.H)) INQa = base.matrix(-np.eye(self._num_samples)) INQb = base.matrix(0.0, (self._num_samples, 1)) EQa = base.matrix(1.0, (1, self._num_samples)) for i in range(self._num_bases): update_single_w(i) self.W = np.dot(self.beta, self.data.T).T
python
{ "resource": "" }
q272917
main
test
def main(): ''' Main Entry point for translator and argument parser ''' args = command_line() translate = partial(translator, args.source, args.dest, version=' '.join([__version__, __build__])) return source(spool(set_task(translate, translit=args.translit)), args.text)
python
{ "resource": "" }
q272918
coroutine
test
def coroutine(func): """ Initializes coroutine essentially priming it to the yield statement. Used as a decorator over functions that generate coroutines. .. code-block:: python # Basic coroutine producer/consumer pattern from translate import coroutine @coroutine def coroutine_foo(bar): try: while True: baz = (yield) bar.send(baz) except GeneratorExit: bar.close() :param func: Unprimed Generator :type func: Function :return: Initialized Coroutine :rtype: Function """ @wraps(func) def initialization(*args, **kwargs): start = func(*args, **kwargs) next(start) return start return initialization
python
{ "resource": "" }
q272919
accumulator
test
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
python
{ "resource": "" }
q272920
set_task
test
def set_task(translator, translit=False): """ Task Setter Coroutine End point destination coroutine of a purely consumer type. Delegates Text IO to the `write_stream` function. :param translation_function: Translator :type translation_function: Function :param translit: Transliteration Switch :type translit: Boolean """ # Initialize Task Queue task = str() queue = list() # Function Partial output = ('translit' if translit else 'trans') stream = partial(write_stream, output=output) workers = ThreadPoolExecutor(max_workers=8) try: while True: task = yield queue.append(task) except GeneratorExit: list(map(stream, workers.map(translator, queue)))
python
{ "resource": "" }
q272921
spool
test
def spool(iterable, maxlen=1250): """ Consumes text streams and spools them together for more io efficient processes. :param iterable: Sends text stream for further processing :type iterable: Coroutine :param maxlen: Maximum query string size :type maxlen: Integer """ words = int() text = str() try: while True: while words < maxlen: stream = yield text = reduce(accumulator, stream, text) words = reduce(accumulator, stream, words) iterable.send(text) words = int() text = str() except GeneratorExit: iterable.send(text) iterable.close()
python
{ "resource": "" }
q272922
source
test
def source(target, inputstream=sys.stdin): """ Coroutine starting point. Produces text stream and forwards to consumers :param target: Target coroutine consumer :type target: Coroutine :param inputstream: Input Source :type inputstream: BufferedTextIO Object """ for line in inputstream: while len(line) > 600: init, sep, line = line.partition(' ') assert len(init) <= 600 target.send(''.join([init, sep])) target.send(line) inputstream.close() return target.close()
python
{ "resource": "" }
q272923
push_url
test
def push_url(interface): ''' Decorates a function returning the url of translation API. Creates and maintains HTTP connection state Returns a dict response object from the server containing the translated text and metadata of the request body :param interface: Callable Request Interface :type interface: Function ''' @functools.wraps(interface) def connection(*args, **kwargs): """ Extends and wraps a HTTP interface. :return: Response Content :rtype: Dictionary """ session = Session() session.mount('http://', HTTPAdapter(max_retries=2)) session.mount('https://', HTTPAdapter(max_retries=2)) request = Request(**interface(*args, **kwargs)) prepare = session.prepare_request(request) response = session.send(prepare, verify=True) if response.status_code != requests.codes.ok: response.raise_for_status() cleanup = re.subn(r',(?=,)', '', response.content.decode('utf-8'))[0] return json.loads(cleanup.replace(r'\xA0', r' ').replace('[,', '[1,'), encoding='UTF-8') return connection
python
{ "resource": "" }
q272924
translator
test
def translator(source, target, phrase, version='0.0 test', charset='utf-8'): """ Returns the url encoded string that will be pushed to the translation server for parsing. List of acceptable language codes for source and target languages can be found as a JSON file in the etc directory. Some source languages are limited in scope of the possible target languages that are available. .. code-block:: python >>> from translate import translator >>> translator('en', 'zh-TW', 'Hello World!') '你好世界!' :param source: Language code for translation source :type source: String :param target: Language code that source will be translate into :type target: String :param phrase: Text body string that will be url encoded and translated :type phrase: String :return: Request Interface :rtype: Dictionary """ url = 'https://translate.google.com/translate_a/single' agent = 'User-Agent', 'py-translate v{}'.format(version) content = 'Content-Type', 'application/json; charset={}'.format(charset) params = {'client': 'a', 'ie': charset, 'oe': charset, 'dt': 't', 'sl': source, 'tl': target, 'q': phrase} request = {'method': 'GET', 'url': url, 'params': params, 'headers': dict([agent, content])} return request
python
{ "resource": "" }
q272925
translation_table
test
def translation_table(language, filepath='supported_translations.json'): ''' Opens up file located under the etc directory containing language codes and prints them out. :param file: Path to location of json file :type file: str :return: language codes :rtype: dict ''' fullpath = abspath(join(dirname(__file__), 'etc', filepath)) if not isfile(fullpath): raise IOError('File does not exist at {0}'.format(fullpath)) with open(fullpath, 'rt') as fp: raw_data = json.load(fp).get(language, None) assert(raw_data is not None) return dict((code['language'], code['name']) for code in raw_data)
python
{ "resource": "" }
q272926
print_table
test
def print_table(language): ''' Generates a formatted table of language codes ''' table = translation_table(language) for code, name in sorted(table.items(), key=operator.itemgetter(0)): print(u'{language:<8} {name:\u3000<20}'.format( name=name, language=code )) return None
python
{ "resource": "" }
q272927
remove_nodes
test
def remove_nodes(network, rm_nodes): """ Create DataFrames of nodes and edges that do not include specified nodes. Parameters ---------- network : pandana.Network rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. Returns ------- nodes, edges : pandas.DataFrame """ rm_nodes = set(rm_nodes) ndf = network.nodes_df edf = network.edges_df nodes_to_keep = ~ndf.index.isin(rm_nodes) edges_to_keep = ~(edf['from'].isin(rm_nodes) | edf['to'].isin(rm_nodes)) return ndf.loc[nodes_to_keep], edf.loc[edges_to_keep]
python
{ "resource": "" }
q272928
network_to_pandas_hdf5
test
def network_to_pandas_hdf5(network, filename, rm_nodes=None): """ Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. """ if rm_nodes is not None: nodes, edges = remove_nodes(network, rm_nodes) else: nodes, edges = network.nodes_df, network.edges_df with pd.HDFStore(filename, mode='w') as store: store['nodes'] = nodes store['edges'] = edges store['two_way'] = pd.Series([network._twoway]) store['impedance_names'] = pd.Series(network.impedance_names)
python
{ "resource": "" }
q272929
network_from_pandas_hdf5
test
def network_from_pandas_hdf5(cls, filename): """ Build a Network from data in a Pandas HDFStore. Parameters ---------- cls : class Class to instantiate, usually pandana.Network. filename : str Returns ------- network : pandana.Network """ with pd.HDFStore(filename) as store: nodes = store['nodes'] edges = store['edges'] two_way = store['two_way'][0] imp_names = store['impedance_names'].tolist() return cls( nodes['x'], nodes['y'], edges['from'], edges['to'], edges[imp_names], twoway=two_way)
python
{ "resource": "" }
q272930
Network.set
test
def set(self, node_ids, variable=None, name="tmp"): """ Characterize urban space with a variable that is related to nodes in the network. Parameters ---------- node_ids : Pandas Series, int A series of node_ids which are usually computed using get_node_ids on this object. variable : Pandas Series, numeric, optional A series which represents some variable defined in urban space. It could be the location of buildings, or the income of all households - just about anything can be aggregated using the network queries provided here and this provides the api to set the variable at its disaggregate locations. Note that node_id and variable should have the same index (although the index is not actually used). If variable is not set, then it is assumed that the variable is all "ones" at the location specified by node_ids. This could be, for instance, the location of all coffee shops which don't really have a variable to aggregate. The variable is connected to the closest node in the Pandana network which assumes no impedance between the location of the variable and the location of the closest network node. name : string, optional Name the variable. This is optional in the sense that if you don't specify it, the default name will be used. Since the same default name is used by aggregate on this object, you can alternate between characterize and aggregate calls without setting names. Returns ------- Nothing """ if variable is None: variable = pd.Series(np.ones(len(node_ids)), index=node_ids.index) df = pd.DataFrame({name: variable, "node_idx": self._node_indexes(node_ids)}) length = len(df) df = df.dropna(how="any") newl = len(df) if length-newl > 0: print( "Removed %d rows because they contain missing values" % (length-newl)) self.variable_names.add(name) self.net.initialize_access_var(name.encode('utf-8'), df.node_idx.values.astype('int'), df[name].values.astype('double'))
python
{ "resource": "" }
q272931
Network.aggregate
test
def aggregate(self, distance, type="sum", decay="linear", imp_name=None, name="tmp"): """ Aggregate information for every source node in the network - this is really the main purpose of this library. This allows you to touch the data specified by calling set and perform some aggregation on it within the specified distance. For instance, summing the population within 1000 meters. Parameters ---------- distance : float The maximum distance to aggregate data within. 'distance' can represent any impedance unit that you have set as your edge weight. This will usually be a distance unit in meters however if you have customized the impedance this could be in other units such as utility or time etc. type : string The type of aggregation, can be one of "ave", "sum", "std", "count", and now "min", "25pct", "median", "75pct", and "max" will compute the associated quantiles. (Quantiles are computed by sorting so might be slower than the others.) decay : string The type of decay to apply, which makes things that are further away count less in the aggregation - must be one of "linear", "exponential" or "flat" (which means no decay). Linear is the fastest computation to perform. When performing an "ave", the decay is typically "flat" imp_name : string, optional The impedance name to use for the aggregation on this network. Must be one of the impedance names passed in the constructor of this object. If not specified, there must be only one impedance passed in the constructor, which will be used. name : string, optional The variable to aggregate. This variable will have been created and named by a call to set. If not specified, the default variable name will be used so that the most recent call to set without giving a name will be the variable used. Returns ------- agg : Pandas Series Returns a Pandas Series for every origin node in the network, with the index which is the same as the node_ids passed to the init method and the values are the aggregations for each source node in the network. """ imp_num = self._imp_name_to_num(imp_name) type = type.lower() if type == "ave": type = "mean" # changed generic ave to mean assert name in self.variable_names, "A variable with that name " \ "has not yet been initialized" res = self.net.get_all_aggregate_accessibility_variables(distance, name.encode('utf-8'), type.encode('utf-8'), decay.encode('utf-8'), imp_num) return pd.Series(res, index=self.node_ids)
python
{ "resource": "" }
q272932
Network.get_node_ids
test
def get_node_ids(self, x_col, y_col, mapping_distance=None): """ Assign node_ids to data specified by x_col and y_col Parameters ---------- x_col : Pandas series (float) A Pandas Series where values specify the x (e.g. longitude) location of dataset. y_col : Pandas series (float) A Pandas Series where values specify the y (e.g. latitude) location of dataset. x_col and y_col should use the same index. mapping_distance : float, optional The maximum distance that will be considered a match between the x, y data and the nearest node in the network. This will usually be a distance unit in meters however if you have customized the impedance this could be in other units such as utility or time etc. If not specified, every x, y coordinate will be mapped to the nearest node. Returns ------- node_ids : Pandas series (int) Returns a Pandas Series of node_ids for each x, y in the input data. The index is the same as the indexes of the x, y input data, and the values are the mapped node_ids. If mapping distance is not passed and if there are no nans in the x, y data, this will be the same length as the x, y data. If the mapping is imperfect, this function returns all the input x, y's that were successfully mapped to node_ids. """ xys = pd.DataFrame({'x': x_col, 'y': y_col}) distances, indexes = self.kdtree.query(xys.as_matrix()) indexes = np.transpose(indexes)[0] distances = np.transpose(distances)[0] node_ids = self.nodes_df.iloc[indexes].index df = pd.DataFrame({"node_id": node_ids, "distance": distances}, index=xys.index) if mapping_distance is not None: df = df[df.distance <= mapping_distance] return df.node_id
python
{ "resource": "" }
q272933
Network.plot
test
def plot( self, data, bbox=None, plot_type='scatter', fig_kwargs=None, bmap_kwargs=None, plot_kwargs=None, cbar_kwargs=None): """ Plot an array of data on a map using matplotlib and Basemap, automatically matching the data to the Pandana network node positions. Keyword arguments are passed to the plotting routine. Parameters ---------- data : pandas.Series Numeric data with the same length and index as the nodes in the network. bbox : tuple, optional (lat_min, lng_min, lat_max, lng_max) plot_type : {'hexbin', 'scatter'}, optional fig_kwargs : dict, optional Keyword arguments that will be passed to matplotlib.pyplot.subplots. Use this to specify things like figure size or background color. bmap_kwargs : dict, optional Keyword arguments that will be passed to the Basemap constructor. This can be used to specify a projection or coastline resolution. plot_kwargs : dict, optional Keyword arguments that will be passed to the matplotlib plotting command used. Use this to control plot styles and color maps used. cbar_kwargs : dict, optional Keyword arguments passed to the Basemap.colorbar method. Use this to control color bar location and label. Returns ------- bmap : Basemap fig : matplotlib.Figure ax : matplotlib.Axes """ from mpl_toolkits.basemap import Basemap fig_kwargs = fig_kwargs or {} bmap_kwargs = bmap_kwargs or {} plot_kwargs = plot_kwargs or {} cbar_kwargs = cbar_kwargs or {} if not bbox: bbox = ( self.nodes_df.y.min(), self.nodes_df.x.min(), self.nodes_df.y.max(), self.nodes_df.x.max()) fig, ax = plt.subplots(**fig_kwargs) bmap = Basemap( bbox[1], bbox[0], bbox[3], bbox[2], ax=ax, **bmap_kwargs) bmap.drawcoastlines() bmap.drawmapboundary() x, y = bmap(self.nodes_df.x.values, self.nodes_df.y.values) if plot_type == 'scatter': plot = bmap.scatter( x, y, c=data.values, **plot_kwargs) elif plot_type == 'hexbin': plot = bmap.hexbin( x, y, C=data.values, **plot_kwargs) bmap.colorbar(plot, **cbar_kwargs) return bmap, fig, ax
python
{ "resource": "" }
q272934
Network.set_pois
test
def set_pois(self, category, maxdist, maxitems, x_col, y_col): """ Set the location of all the pois of this category. The pois are connected to the closest node in the Pandana network which assumes no impedance between the location of the variable and the location of the closest network node. Parameters ---------- category : string The name of the category for this set of pois maxdist - the maximum distance that will later be used in find_all_nearest_pois maxitems - the maximum number of items that will later be requested in find_all_nearest_pois x_col : Pandas Series (float) The x location (longitude) of pois in this category y_col : Pandas Series (Float) The y location (latitude) of pois in this category Returns ------- Nothing """ if category not in self.poi_category_names: self.poi_category_names.append(category) self.max_pois = maxitems node_ids = self.get_node_ids(x_col, y_col) self.poi_category_indexes[category] = node_ids.index node_idx = self._node_indexes(node_ids) self.net.initialize_category(maxdist, maxitems, category.encode('utf-8'), node_idx.values)
python
{ "resource": "" }
q272935
Network.nearest_pois
test
def nearest_pois(self, distance, category, num_pois=1, max_distance=None, imp_name=None, include_poi_ids=False): """ Find the distance to the nearest pois from each source node. The bigger values in this case mean less accessibility. Parameters ---------- distance : float The maximum distance to look for pois. This will usually be a distance unit in meters however if you have customized the impedance this could be in other units such as utility or time etc. category : string The name of the category of poi to look for num_pois : int The number of pois to look for, this also sets the number of columns in the DataFrame that gets returned max_distance : float, optional The value to set the distance to if there is NO poi within the specified distance - if not specified, gets set to distance. This will usually be a distance unit in meters however if you have customized the impedance this could be in other units such as utility or time etc. imp_name : string, optional The impedance name to use for the aggregation on this network. Must be one of the impedance names passed in the constructor of this object. If not specified, there must be only one impedance passed in the constructor, which will be used. include_poi_ids : bool, optional If this flag is set to true, the call will add columns to the return DataFrame - instead of just returning the distance for the nth POI, it will also return the id of that POI. The names of the columns with the poi ids will be poi1, poi2, etc - it will take roughly twice as long to include these ids as to not include them Returns ------- d : Pandas DataFrame Like aggregate, this series has an index of all the node ids for the network. Unlike aggregate, this method returns a dataframe with the number of columns equal to the distances to the Nth closest poi. For instance, if you ask for the 10 closest poi to each node, column d[1] wil be the distance to the 1st closest poi of that category while column d[2] will be the distance to the 2nd closest poi, and so on. """ if max_distance is None: max_distance = distance if category not in self.poi_category_names: assert 0, "Need to call set_pois for this category" if num_pois > self.max_pois: assert 0, "Asking for more pois than set in init_pois" imp_num = self._imp_name_to_num(imp_name) dists, poi_ids = self.net.find_all_nearest_pois( distance, num_pois, category.encode('utf-8'), imp_num) dists[dists == -1] = max_distance df = pd.DataFrame(dists, index=self.node_ids) df.columns = list(range(1, num_pois+1)) if include_poi_ids: df2 = pd.DataFrame(poi_ids, index=self.node_ids) df2.columns = ["poi%d" % i for i in range(1, num_pois+1)] for col in df2.columns: # if this is still all working according to plan at this point # the great magic trick is now to turn the integer position of # the poi, which is painstakingly returned from the c++ code, # and turn it into the actual index that was used when it was # initialized as a pandas series - this really is pandas-like # thinking. it's complicated on the inside, but quite # intuitive to the user I think s = df2[col].astype('int') df2[col] = self.poi_category_indexes[category].values[s] df2.loc[s == -1, col] = np.nan df = pd.concat([df, df2], axis=1) return df
python
{ "resource": "" }
q272936
Network.low_connectivity_nodes
test
def low_connectivity_nodes(self, impedance, count, imp_name=None): """ Identify nodes that are connected to fewer than some threshold of other nodes within a given distance. Parameters ---------- impedance : float Distance within which to search for other connected nodes. This will usually be a distance unit in meters however if you have customized the impedance this could be in other units such as utility or time etc. count : int Threshold for connectivity. If a node is connected to fewer than this many nodes within `impedance` it will be identified as "low connectivity". imp_name : string, optional The impedance name to use for the aggregation on this network. Must be one of the impedance names passed in the constructor of this object. If not specified, there must be only one impedance passed in the constructor, which will be used. Returns ------- node_ids : array List of "low connectivity" node IDs. """ # set a counter variable on all nodes self.set(self.node_ids.to_series(), name='counter') # count nodes within impedance range agg = self.aggregate( impedance, type='count', imp_name=imp_name, name='counter') return np.array(agg[agg < count].index)
python
{ "resource": "" }
q272937
process_node
test
def process_node(e): """ Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters ---------- e : dict Returns ------- node : dict """ uninteresting_tags = { 'source', 'source_ref', 'source:ref', 'history', 'attribution', 'created_by', 'tiger:tlid', 'tiger:upload_uuid', } node = { 'id': e['id'], 'lat': e['lat'], 'lon': e['lon'] } if 'tags' in e: for t, v in list(e['tags'].items()): if t not in uninteresting_tags: node[t] = v return node
python
{ "resource": "" }
q272938
make_osm_query
test
def make_osm_query(query): """ Make a request to OSM and return the parsed JSON. Parameters ---------- query : str A string in the Overpass QL format. Returns ------- data : dict """ osm_url = 'http://www.overpass-api.de/api/interpreter' req = requests.get(osm_url, params={'data': query}) req.raise_for_status() return req.json()
python
{ "resource": "" }
q272939
build_node_query
test
def build_node_query(lat_min, lng_min, lat_max, lng_max, tags=None): """ Build the string for a node-based OSM query. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float tags : str or list of str, optional Node tags that will be used to filter the search. See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide for information about OSM Overpass queries and http://wiki.openstreetmap.org/wiki/Map_Features for a list of tags. Returns ------- query : str """ if tags is not None: if isinstance(tags, str): tags = [tags] tags = ''.join('[{}]'.format(t) for t in tags) else: tags = '' query_fmt = ( '[out:json];' '(' ' node' ' {tags}' ' ({lat_min},{lng_min},{lat_max},{lng_max});' ');' 'out;') return query_fmt.format( lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max, tags=tags)
python
{ "resource": "" }
q272940
node_query
test
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None): """ Search for OSM nodes within a bounding box that match given tags. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float tags : str or list of str, optional Node tags that will be used to filter the search. See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide for information about OSM Overpass queries and http://wiki.openstreetmap.org/wiki/Map_Features for a list of tags. Returns ------- nodes : pandas.DataFrame Will have 'lat' and 'lon' columns, plus other columns for the tags associated with the node (these will vary based on the query). Index will be the OSM node IDs. """ node_data = make_osm_query(build_node_query( lat_min, lng_min, lat_max, lng_max, tags=tags)) if len(node_data['elements']) == 0: raise RuntimeError('OSM query results contain no data.') nodes = [process_node(n) for n in node_data['elements']] return pd.DataFrame.from_records(nodes, index='id')
python
{ "resource": "" }
q272941
isregex
test
def isregex(value): """ Returns ``True`` if the input argument object is a native regular expression object, otherwise ``False``. Arguments: value (mixed): input value to test. Returns: bool """ if not value: return False return any((isregex_expr(value), isinstance(value, retype)))
python
{ "resource": "" }
q272942
BaseMatcher.compare
test
def compare(self, value, expectation, regex_expr=False): """ Compares two values with regular expression matching support. Arguments: value (mixed): value to compare. expectation (mixed): value to match. regex_expr (bool, optional): enables string based regex matching. Returns: bool """ return compare(value, expectation, regex_expr=regex_expr)
python
{ "resource": "" }
q272943
fluent
test
def fluent(fn): """ Simple function decorator allowing easy method chaining. Arguments: fn (function): target function to decorate. """ @functools.wraps(fn) def wrapper(self, *args, **kw): # Trigger method proxy result = fn(self, *args, **kw) # Return self instance or method result return self if result is None else result return wrapper
python
{ "resource": "" }
q272944
compare
test
def compare(expr, value, regex_expr=False): """ Compares an string or regular expression againast a given value. Arguments: expr (str|regex): string or regular expression value to compare. value (str): value to compare against to. regex_expr (bool, optional): enables string based regex matching. Raises: AssertionError: in case of assertion error. Returns: bool """ # Strict equality comparison if expr == value: return True # Infer negate expression to match, if needed negate = False if isinstance(expr, str): negate = expr.startswith(NEGATE) expr = strip_negate(expr) if negate else expr try: # RegExp or strict equality comparison test(expr, value, regex_expr=regex_expr) except Exception as err: if negate: return True else: raise err return True
python
{ "resource": "" }
q272945
trigger_methods
test
def trigger_methods(instance, args): """" Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None """ # Start the magic for name in sorted(args): value = args[name] target = instance # If response attibutes if name.startswith('response_') or name.startswith('reply_'): name = name.replace('response_', '').replace('reply_', '') # If instance has response attribute, use it if hasattr(instance, '_response'): target = instance._response # Retrieve class member for inspection and future use member = getattr(target, name, None) # Is attribute isattr = name in dir(target) iscallable = ismethod(member) and not isfunction(member) if not iscallable and not isattr: raise PookInvalidArgument('Unsupported argument: {}'.format(name)) # Set attribute or trigger method if iscallable: member(value) else: setattr(target, name, value)
python
{ "resource": "" }
q272946
MatcherEngine.match
test
def match(self, request): """ Match the given HTTP request instance against the registered matcher functions in the current engine. Arguments: request (pook.Request): outgoing request to match. Returns: tuple(bool, list[Exception]): ``True`` if all matcher tests passes, otherwise ``False``. Also returns an optional list of error exceptions. """ errors = [] def match(matcher): try: return matcher.match(request) except Exception as err: err = '{}: {}'.format(type(matcher).__name__, err) errors.append(err) return False return all([match(matcher) for matcher in self]), errors
python
{ "resource": "" }
q272947
get
test
def get(name): """ Returns a matcher instance by class or alias name. Arguments: name (str): matcher class name or alias. Returns: matcher: found matcher instance, otherwise ``None``. """ for matcher in matchers: if matcher.__name__ == name or getattr(matcher, 'name', None) == name: return matcher
python
{ "resource": "" }
q272948
init
test
def init(name, *args): """ Initializes a matcher instance passing variadic arguments to its constructor. Acts as a delegator proxy. Arguments: name (str): matcher class name or alias to execute. *args (mixed): variadic argument Returns: matcher: matcher instance. Raises: ValueError: if matcher was not found. """ matcher = get(name) if not matcher: raise ValueError('Cannot find matcher: {}'.format(name)) return matcher(*args)
python
{ "resource": "" }
q272949
Response.body
test
def body(self, body): """ Defines response body data. Arguments: body (str|bytes): response body to use. Returns: self: ``pook.Response`` current instance. """ if isinstance(body, bytes): body = body.decode('utf-8') self._body = body
python
{ "resource": "" }
q272950
Response.json
test
def json(self, data): """ Defines the mock response JSON body. Arguments: data (dict|list|str): JSON body data. Returns: self: ``pook.Response`` current instance. """ self._headers['Content-Type'] = 'application/json' if not isinstance(data, str): data = json.dumps(data, indent=4) self._body = data
python
{ "resource": "" }
q272951
HTTPHeaderDict.set
test
def set(self, key, val): """ Sets a header field with the given value, removing previous values. Usage:: headers = HTTPHeaderDict(foo='bar') headers.set('Foo', 'baz') headers['foo'] > 'baz' """ key_lower = key.lower() new_vals = key, val # Keep the common case aka no item present as fast as possible vals = self._container.setdefault(key_lower, new_vals) if new_vals is not vals: self._container[key_lower] = [vals[0], vals[1], val]
python
{ "resource": "" }
q272952
_append_funcs
test
def _append_funcs(target, items): """ Helper function to append functions into a given list. Arguments: target (list): receptor list to append functions. items (iterable): iterable that yields elements to append. """ [target.append(item) for item in items if isfunction(item) or ismethod(item)]
python
{ "resource": "" }
q272953
_trigger_request
test
def _trigger_request(instance, request): """ Triggers request mock definition methods dynamically based on input keyword arguments passed to `pook.Mock` constructor. This is used to provide a more Pythonic interface vs chainable API approach. """ if not isinstance(request, Request): raise TypeError('request must be instance of pook.Request') # Register request matchers for key in request.keys: if hasattr(instance, key): getattr(instance, key)(getattr(request, key))
python
{ "resource": "" }
q272954
Mock.url
test
def url(self, url): """ Defines the mock URL to match. It can be a full URL with path and query params. Protocol schema is optional, defaults to ``http://``. Arguments: url (str): mock URL to match. E.g: ``server.com/api``. Returns: self: current Mock instance. """ self._request.url = url self.add_matcher(matcher('URLMatcher', url))
python
{ "resource": "" }
q272955
Mock.headers
test
def headers(self, headers=None, **kw): """ Defines a dictionary of arguments. Header keys are case insensitive. Arguments: headers (dict): headers to match. **headers (dict): headers to match as variadic keyword arguments. Returns: self: current Mock instance. """ headers = kw if kw else headers self._request.headers = headers self.add_matcher(matcher('HeadersMatcher', headers))
python
{ "resource": "" }
q272956
Mock.header_present
test
def header_present(self, *names): """ Defines a new header matcher expectation that must be present in the outgoing request in order to be satisfied, no matter what value it hosts. Header keys are case insensitive. Arguments: *names (str): header or headers names to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .header_present('content-type')) """ for name in names: headers = {name: re.compile('(.*)')} self.add_matcher(matcher('HeadersMatcher', headers))
python
{ "resource": "" }
q272957
Mock.headers_present
test
def headers_present(self, headers): """ Defines a list of headers that must be present in the outgoing request in order to satisfy the matcher, no matter what value the headers hosts. Header keys are case insensitive. Arguments: headers (list|tuple): header keys to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .headers_present(['content-type', 'Authorization'])) """ headers = {name: re.compile('(.*)') for name in headers} self.add_matcher(matcher('HeadersMatcher', headers))
python
{ "resource": "" }
q272958
Mock.content
test
def content(self, value): """ Defines the ``Content-Type`` outgoing header value to match. You can pass one of the following type aliases instead of the full MIME type representation: - ``json`` = ``application/json`` - ``xml`` = ``application/xml`` - ``html`` = ``text/html`` - ``text`` = ``text/plain`` - ``urlencoded`` = ``application/x-www-form-urlencoded`` - ``form`` = ``application/x-www-form-urlencoded`` - ``form-data`` = ``application/x-www-form-urlencoded`` Arguments: value (str): type alias or header value to match. Returns: self: current Mock instance. """ header = {'Content-Type': TYPES.get(value, value)} self._request.headers = header self.add_matcher(matcher('HeadersMatcher', header))
python
{ "resource": "" }
q272959
Mock.params
test
def params(self, params): """ Defines a set of URL query params to match. Arguments: params (dict): set of params to match. Returns: self: current Mock instance. """ url = furl(self._request.rawurl) url = url.add(params) self._request.url = url.url self.add_matcher(matcher('QueryMatcher', params))
python
{ "resource": "" }
q272960
Mock.body
test
def body(self, body): """ Defines the body data to match. ``body`` argument can be a ``str``, ``binary`` or a regular expression. Arguments: body (str|binary|regex): body data to match. Returns: self: current Mock instance. """ self._request.body = body self.add_matcher(matcher('BodyMatcher', body))
python
{ "resource": "" }
q272961
Mock.json
test
def json(self, json): """ Defines the JSON body to match. ``json`` argument can be an JSON string, a JSON serializable Python structure, such as a ``dict`` or ``list`` or it can be a regular expression used to match the body. Arguments: json (str|dict|list|regex): body JSON to match. Returns: self: current Mock instance. """ self._request.json = json self.add_matcher(matcher('JSONMatcher', json))
python
{ "resource": "" }
q272962
Mock.xml
test
def xml(self, xml): """ Defines a XML body value to match. Arguments: xml (str|regex): body XML to match. Returns: self: current Mock instance. """ self._request.xml = xml self.add_matcher(matcher('XMLMatcher', xml))
python
{ "resource": "" }
q272963
Mock.file
test
def file(self, path): """ Reads the body to match from a disk file. Arguments: path (str): relative or absolute path to file to read from. Returns: self: current Mock instance. """ with open(path, 'r') as f: self.body(str(f.read()))
python
{ "resource": "" }
q272964
Mock.persist
test
def persist(self, status=None): """ Enables persistent mode for the current mock. Returns: self: current Mock instance. """ self._persist = status if type(status) is bool else True
python
{ "resource": "" }
q272965
Mock.error
test
def error(self, error): """ Defines a simulated exception error that will be raised. Arguments: error (str|Exception): error to raise. Returns: self: current Mock instance. """ self._error = RuntimeError(error) if isinstance(error, str) else error
python
{ "resource": "" }
q272966
Mock.reply
test
def reply(self, status=200, new_response=False, **kw): """ Defines the mock response. Arguments: status (int, optional): response status code. Defaults to ``200``. **kw (dict): optional keyword arguments passed to ``pook.Response`` constructor. Returns: pook.Response: mock response definition instance. """ # Use or create a Response mock instance res = Response(**kw) if new_response else self._response # Define HTTP mandatory response status res.status(status or res._status) # Expose current mock instance in response for self-reference res.mock = self # Define mock response self._response = res # Return response return res
python
{ "resource": "" }
q272967
Mock.match
test
def match(self, request): """ Matches an outgoing HTTP request against the current mock matchers. This method acts like a delegator to `pook.MatcherEngine`. Arguments: request (pook.Request): request instance to match. Raises: Exception: if the mock has an exception defined. Returns: tuple(bool, list[Exception]): ``True`` if the mock matches the outgoing HTTP request, otherwise ``False``. Also returns an optional list of error exceptions. """ # If mock already expired, fail it if self._times <= 0: raise PookExpiredMock('Mock expired') # Trigger mock filters for test in self.filters: if not test(request, self): return False, [] # Trigger mock mappers for mapper in self.mappers: request = mapper(request, self) if not request: raise ValueError('map function must return a request object') # Match incoming request against registered mock matchers matches, errors = self.matchers.match(request) # If not matched, return False if not matches: return False, errors # Register matched request for further inspecion and reference self._calls.append(request) # Increase mock call counter self._matches += 1 if not self._persist: self._times -= 1 # Raise simulated error if self._error: raise self._error # Trigger callback when matched for callback in self.callbacks: callback(request, self) return True, []
python
{ "resource": "" }
q272968
activate_async
test
def activate_async(fn, _engine): """ Async version of activate decorator Arguments: fn (function): function that be wrapped by decorator. _engine (Engine): pook engine instance Returns: function: decorator wrapper function. """ @coroutine @functools.wraps(fn) def wrapper(*args, **kw): _engine.activate() try: if iscoroutinefunction(fn): yield from fn(*args, **kw) # noqa else: fn(*args, **kw) finally: _engine.disable() return wrapper
python
{ "resource": "" }
q272969
Engine.set_mock_engine
test
def set_mock_engine(self, engine): """ Sets a custom mock engine, replacing the built-in one. This is particularly useful if you want to replace the built-in HTTP traffic mock interceptor engine with your custom one. For mock engine implementation details, see `pook.MockEngine`. Arguments: engine (pook.MockEngine): custom mock engine to use. """ if not engine: raise TypeError('engine must be a valid object') # Instantiate mock engine mock_engine = engine(self) # Validate minimum viable interface methods = ('activate', 'disable') if not all([hasattr(mock_engine, method) for method in methods]): raise NotImplementedError('engine must implementent the ' 'required methods') # Use the custom mock engine self.mock_engine = mock_engine # Enable mock engine, if needed if self.active: self.mock_engine.activate()
python
{ "resource": "" }
q272970
Engine.enable_network
test
def enable_network(self, *hostnames): """ Enables real networking mode, optionally passing one or multiple hostnames that would be used as filter. If at least one hostname matches with the outgoing traffic, the request will be executed via the real network. Arguments: *hostnames: optional list of host names to enable real network against them. hostname value can be a regular expression. """ def hostname_filter(hostname, req): if isregex(hostname): return hostname.match(req.url.hostname) return req.url.hostname == hostname for hostname in hostnames: self.use_network_filter(partial(hostname_filter, hostname)) self.networking = True
python
{ "resource": "" }
q272971
Engine.mock
test
def mock(self, url=None, **kw): """ Creates and registers a new HTTP mock in the current engine. Arguments: url (str): request URL to mock. activate (bool): force mock engine activation. Defaults to ``False``. **kw (mixed): variadic keyword arguments for ``Mock`` constructor. Returns: pook.Mock: new mock instance. """ # Activate mock engine, if explicitly requested if kw.get('activate'): kw.pop('activate') self.activate() # Create the new HTTP mock expectation mock = Mock(url=url, **kw) # Expose current engine instance via mock mock._engine = self # Register the mock in the current engine self.add_mock(mock) # Return it for consumer satisfaction return mock
python
{ "resource": "" }
q272972
Engine.remove_mock
test
def remove_mock(self, mock): """ Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove. """ self.mocks = [m for m in self.mocks if m is not mock]
python
{ "resource": "" }
q272973
Engine.activate
test
def activate(self): """ Activates the registered interceptors in the mocking engine. This means any HTTP traffic captures by those interceptors will trigger the HTTP mock matching engine in order to determine if a given HTTP transaction should be mocked out or not. """ if self.active: return None # Activate mock engine self.mock_engine.activate() # Enable engine state self.active = True
python
{ "resource": "" }
q272974
Engine.disable
test
def disable(self): """ Disables interceptors and stops intercepting any outgoing HTTP traffic. """ if not self.active: return None # Disable current mock engine self.mock_engine.disable() # Disable engine state self.active = False
python
{ "resource": "" }
q272975
Engine.should_use_network
test
def should_use_network(self, request): """ Verifies if real networking mode should be used for the given request, passing it to the registered network filters. Arguments: request (pook.Request): outgoing HTTP request to test. Returns: bool """ return (self.networking and all((fn(request) for fn in self.network_filters)))
python
{ "resource": "" }
q272976
Engine.match
test
def match(self, request): """ Matches a given Request instance contract against the registered mocks. If a mock passes all the matchers, its response will be returned. Arguments: request (pook.Request): Request contract to match. Raises: pook.PookNoMatches: if networking is disabled and no mock matches with the given request contract. Returns: pook.Response: the mock response to be used by the interceptor. """ # Trigger engine-level request filters for test in self.filters: if not test(request, self): return False # Trigger engine-level request mappers for mapper in self.mappers: request = mapper(request, self) if not request: raise ValueError('map function must return a request object') # Store list of mock matching errors for further debugging match_errors = [] # Try to match the request against registered mock definitions for mock in self.mocks[:]: try: # Return the first matched HTTP request mock matches, errors = mock.match(request.copy()) if len(errors): match_errors += errors if matches: return mock except PookExpiredMock: # Remove the mock if already expired self.mocks.remove(mock) # Validate that we have a mock if not self.should_use_network(request): msg = 'pook error!\n\n' msg += ( '=> Cannot match any mock for the ' 'following request:\n{}'.format(request) ) # Compose unmatch error details, if debug mode is enabled if self.debug: err = '\n\n'.join([str(err) for err in match_errors]) if err: msg += '\n\n=> Detailed matching errors:\n{}\n'.format(err) # Raise no matches exception raise PookNoMatches(msg) # Register unmatched request self.unmatched_reqs.append(request)
python
{ "resource": "" }
q272977
Request.copy
test
def copy(self): """ Copies the current Request object instance for side-effects purposes. Returns: pook.Request: copy of the current Request instance. """ req = type(self)() req.__dict__ = self.__dict__.copy() req._headers = self.headers.copy() return req
python
{ "resource": "" }
q272978
activate
test
def activate(fn=None): """ Enables the HTTP traffic interceptors. This function can be used as decorator. Arguments: fn (function|coroutinefunction): Optional function argument if used as decorator. Returns: function: decorator wrapper function, only if called as decorator, otherwise ``None``. Example:: # Standard use case pook.activate() pook.mock('server.com/foo').reply(404) res = requests.get('server.com/foo') assert res.status_code == 404 pook.disable() # Decorator use case @pook.activate def test_request(): pook.mock('server.com/foo').reply(404) res = requests.get('server.com/foo') assert res.status_code == 404 """ # If not used as decorator, activate the engine and exit if not isfunction(fn): _engine.activate() return None # If used as decorator for an async coroutine, wrap it if iscoroutinefunction is not None and iscoroutinefunction(fn): return activate_async(fn, _engine) @functools.wraps(fn) def wrapper(*args, **kw): _engine.activate() try: fn(*args, **kw) finally: _engine.disable() return wrapper
python
{ "resource": "" }
q272979
use
test
def use(network=False): """ Creates a new isolated mock engine to be used via context manager. Example:: with pook.use() as engine: pook.mock('server.com/foo').reply(404) res = requests.get('server.com/foo') assert res.status_code == 404 """ global _engine # Create temporal engine __engine = _engine activated = __engine.active if activated: __engine.disable() _engine = Engine(network=network) _engine.activate() # Yield enfine to be used by the context manager yield _engine # Restore engine state _engine.disable() if network: _engine.disable_network() # Restore previous engine _engine = __engine if activated: _engine.activate()
python
{ "resource": "" }
q272980
MockEngine.add_interceptor
test
def add_interceptor(self, *interceptors): """ Adds one or multiple HTTP traffic interceptors to the current mocking engine. Interceptors are typically HTTP client specific wrapper classes that implements the pook interceptor interface. Arguments: interceptors (pook.interceptors.BaseInterceptor) """ for interceptor in interceptors: self.interceptors.append(interceptor(self.engine))
python
{ "resource": "" }
q272981
MockEngine.remove_interceptor
test
def remove_interceptor(self, name): """ Removes a specific interceptor by name. Arguments: name (str): interceptor name to disable. Returns: bool: `True` if the interceptor was disabled, otherwise `False`. """ for index, interceptor in enumerate(self.interceptors): matches = ( type(interceptor).__name__ == name or getattr(interceptor, 'name') == name ) if matches: self.interceptors.pop(index) return True return False
python
{ "resource": "" }
q272982
get_setting
test
def get_setting(connection, key): """Get key from connection or default to settings.""" if key in connection.settings_dict: return connection.settings_dict[key] else: return getattr(settings, key)
python
{ "resource": "" }
q272983
DecryptedCol.as_sql
test
def as_sql(self, compiler, connection): """Build SQL with decryption and casting.""" sql, params = super(DecryptedCol, self).as_sql(compiler, connection) sql = self.target.get_decrypt_sql(connection) % (sql, self.target.get_cast_sql()) return sql, params
python
{ "resource": "" }
q272984
HashMixin.pre_save
test
def pre_save(self, model_instance, add): """Save the original_value.""" if self.original: original_value = getattr(model_instance, self.original) setattr(model_instance, self.attname, original_value) return super(HashMixin, self).pre_save(model_instance, add)
python
{ "resource": "" }
q272985
HashMixin.get_placeholder
test
def get_placeholder(self, value=None, compiler=None, connection=None): """ Tell postgres to encrypt this field with a hashing function. The `value` string is checked to determine if we need to hash or keep the current value. `compiler` and `connection` is ignored here as we don't need custom operators. """ if value is None or value.startswith('\\x'): return '%s' return self.get_encrypt_sql(connection)
python
{ "resource": "" }
q272986
PGPMixin.get_col
test
def get_col(self, alias, output_field=None): """Get the decryption for col.""" if output_field is None: output_field = self if alias != self.model._meta.db_table or output_field != self: return DecryptedCol( alias, self, output_field ) else: return self.cached_col
python
{ "resource": "" }
q272987
PGPPublicKeyFieldMixin.get_placeholder
test
def get_placeholder(self, value=None, compiler=None, connection=None): """Tell postgres to encrypt this field using PGP.""" return self.encrypt_sql.format(get_setting(connection, 'PUBLIC_PGP_KEY'))
python
{ "resource": "" }
q272988
hunt_repeated_yaml_keys
test
def hunt_repeated_yaml_keys(data): """Parses yaml and returns a list of repeated variables and the line on which they occur """ loader = yaml.Loader(data) def compose_node(parent, index): # the line number where the previous token has ended (plus empty lines) line = loader.line node = Composer.compose_node(loader, parent, index) node.__line__ = line + 1 return node def construct_mapping(node, deep=False): mapping = dict() errors = dict() for key_node, value_node in node.value: key = key_node.value if key in mapping: if key in errors: errors[key].append(key_node.__line__) else: errors[key] = [mapping[key], key_node.__line__] mapping[key] = key_node.__line__ return errors loader.compose_node = compose_node loader.construct_mapping = construct_mapping data = loader.get_single_data() return data
python
{ "resource": "" }
q272989
base_regression
test
def base_regression(Q, slope=None): """ this function calculates the regression coefficients for a given vector containing the averages of tip and branch quantities. Parameters ---------- Q : numpy.array vector with slope : None, optional Description Returns ------- TYPE Description """ if slope is None: slope = (Q[dtavgii] - Q[tavgii]*Q[davgii]/Q[sii]) \ /(Q[tsqii] - Q[tavgii]**2/Q[sii]) only_intercept=False else: only_intercept=True intercept = (Q[davgii] - Q[tavgii]*slope)/Q[sii] if only_intercept: return {'slope':slope, 'intercept':intercept, 'chisq': 0.5*(Q[dsqii]/Q[sii] - Q[davgii]**2/Q[sii]**2)} chisq = 0.5*(Q[dsqii] - Q[davgii]**2/Q[sii] - (Q[dtavgii] - Q[davgii]*Q[tavgii]/Q[sii])**2/(Q[tsqii] - Q[tavgii]**2/Q[sii])) estimator_hessian = np.array([[Q[tsqii], Q[tavgii]], [Q[tavgii], Q[sii]]]) return {'slope':slope, 'intercept':intercept, 'chisq':chisq, 'hessian':estimator_hessian, 'cov':np.linalg.inv(estimator_hessian)}
python
{ "resource": "" }
q272990
TreeRegression.CovInv
test
def CovInv(self): """ Inverse of the covariance matrix Returns ------- H : (np.array) inverse of the covariance matrix. """ self.recurse(full_matrix=True) return self.tree.root.cinv
python
{ "resource": "" }
q272991
TreeRegression.recurse
test
def recurse(self, full_matrix=False): """ recursion to calculate inverse covariance matrix Parameters ---------- full_matrix : bool, optional if True, the entire inverse matrix is calculated. otherwise, only the weighing vector. """ for n in self.tree.get_nonterminals(order='postorder'): n_leaves = len(n._ii) if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float) r = np.zeros(n_leaves, dtype=float) c_count = 0 for c in n: ssq = self.branch_variance(c) nc = len(c._ii) if c.is_terminal(): if full_matrix: M[c_count, c_count] = 1.0/ssq r[c_count] = 1.0/ssq else: if full_matrix: M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(1+ssq*c.s) r[c_count:c_count+nc] = c.r/(1+ssq*c.s) c_count += nc if full_matrix: n.cinv = M n.r = r #M.sum(axis=1) n.s = n.r.sum()
python
{ "resource": "" }
q272992
TreeRegression._calculate_averages
test
def _calculate_averages(self): """ calculate the weighted sums of the tip and branch values and their second moments. """ for n in self.tree.get_nonterminals(order='postorder'): Q = np.zeros(6, dtype=float) for c in n: tv = self.tip_value(c) bv = self.branch_value(c) var = self.branch_variance(c) Q += self.propagate_averages(c, tv, bv, var) n.Q=Q for n in self.tree.find_clades(order='preorder'): O = np.zeros(6, dtype=float) if n==self.tree.root: n.Qtot = n.Q continue for c in n.up: if c==n: continue tv = self.tip_value(c) bv = self.branch_value(c) var = self.branch_variance(c) O += self.propagate_averages(c, tv, bv, var) if n.up!=self.tree.root: c = n.up tv = self.tip_value(c) bv = self.branch_value(c) var = self.branch_variance(c) O += self.propagate_averages(c, tv, bv, var, outgroup=True) n.O = O if not n.is_terminal(): tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) n.Qtot = n.Q + self.propagate_averages(n, tv, bv, var, outgroup=True)
python
{ "resource": "" }
q272993
TreeRegression.propagate_averages
test
def propagate_averages(self, n, tv, bv, var, outgroup=False): """ This function implements the propagation of the means, variance, and covariances along a branch. It operates both towards the root and tips. Parameters ---------- n : (node) the branch connecting this node to its parent is used for propagation tv : (float) tip value. Only required if not is terminal bl : (float) branch value. The increment of the tree associated quantity' var : (float) the variance increment along the branch Returns ------- Q : (np.array) a vector of length 6 containing the updated quantities """ if n.is_terminal() and outgroup==False: if tv is None or np.isinf(tv) or np.isnan(tv): res = np.array([0, 0, 0, 0, 0, 0]) elif var==0: res = np.array([np.inf, np.inf, np.inf, np.inf, np.inf, np.inf]) else: res = np.array([ tv/var, bv/var, tv**2/var, bv*tv/var, bv**2/var, 1.0/var], dtype=float) else: tmpQ = n.O if outgroup else n.Q denom = 1.0/(1+var*tmpQ[sii]) res = np.array([ tmpQ[tavgii]*denom, (tmpQ[davgii] + bv*tmpQ[sii])*denom, tmpQ[tsqii] - var*tmpQ[tavgii]**2*denom, tmpQ[dtavgii] + tmpQ[tavgii]*bv - var*tmpQ[tavgii]*(tmpQ[davgii] + bv*tmpQ[sii])*denom, tmpQ[dsqii] + 2*bv*tmpQ[davgii] + bv**2*tmpQ[sii] - var*(tmpQ[davgii]**2 + 2*bv*tmpQ[davgii]*tmpQ[sii] + bv**2*tmpQ[sii]**2)*denom, tmpQ[sii]*denom] ) return res
python
{ "resource": "" }
q272994
TreeRegression.explained_variance
test
def explained_variance(self): """calculate standard explained variance Returns ------- float r-value of the root-to-tip distance and time. independent of regression model, but dependent on root choice """ self.tree.root._v=0 for n in self.tree.get_nonterminals(order='preorder'): for c in n: c._v = n._v + self.branch_value(c) raw = np.array([(self.tip_value(n), n._v) for n in self.tree.get_terminals() if self.tip_value(n) is not None]) return np.corrcoef(raw.T)[0,1]
python
{ "resource": "" }
q272995
TreeRegression.regression
test
def regression(self, slope=None): """regress tip values against branch values Parameters ---------- slope : None, optional if given, the slope isn't optimized Returns ------- dict regression parameters """ self._calculate_averages() clock_model = base_regression(self.tree.root.Q, slope) clock_model['r_val'] = self.explained_variance() return clock_model
python
{ "resource": "" }
q272996
TreeRegression.find_best_root
test
def find_best_root(self, force_positive=True, slope=None): """ determine the position on the tree that minimizes the bilinear product of the inverse covariance and the data vectors. Returns ------- best_root : (dict) dictionary with the node, the fraction `x` at which the branch is to be split, and the regression parameters """ self._calculate_averages() best_root = {"chisq": np.inf} for n in self.tree.find_clades(): if n==self.tree.root: continue tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) x, chisq = self._optimal_root_along_branch(n, tv, bv, var, slope=slope) if (chisq<best_root["chisq"]): tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \ + self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True) reg = base_regression(tmpQ, slope=slope) if reg["slope"]>=0 or (force_positive==False): best_root = {"node":n, "split":x} best_root.update(reg) if 'node' not in best_root: print("TreeRegression.find_best_root: No valid root found!", force_positive) return None if 'hessian' in best_root: # calculate differentials with respect to x deriv = [] n = best_root["node"] tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) for dx in [-0.001, 0.001]: y = min(1.0, max(0.0, best_root["split"]+dx)) tmpQ = self.propagate_averages(n, tv, bv*y, var*y) \ + self.propagate_averages(n, tv, bv*(1-y), var*(1-y), outgroup=True) reg = base_regression(tmpQ, slope=slope) deriv.append([y,reg['chisq'], tmpQ[tavgii], tmpQ[davgii]]) estimator_hessian = np.zeros((3,3)) estimator_hessian[:2,:2] = best_root['hessian'] estimator_hessian[2,2] = (deriv[0][1] + deriv[1][1] - 2.0*best_root['chisq'])/(deriv[0][0] - deriv[1][0])**2 # estimator_hessian[2,0] = (deriv[0][2] - deriv[1][2])/(deriv[0][0] - deriv[1][0]) # estimator_hessian[2,1] = (deriv[0][3] - deriv[1][3])/(deriv[0][0] - deriv[1][0]) estimator_hessian[0,2] = estimator_hessian[2,0] estimator_hessian[1,2] = estimator_hessian[2,1] best_root['hessian'] = estimator_hessian best_root['cov'] = np.linalg.inv(estimator_hessian) return best_root
python
{ "resource": "" }
q272997
Coalescent.set_Tc
test
def set_Tc(self, Tc, T=None): ''' initialize the merger model with a coalescent time Args: - Tc: a float or an iterable, if iterable another argument T of same shape is required - T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc Returns: - None ''' if isinstance(Tc, Iterable): if len(Tc)==len(T): x = np.concatenate(([-ttconf.BIG_NUMBER], T, [ttconf.BIG_NUMBER])) y = np.concatenate(([Tc[0]], Tc, [Tc[-1]])) self.Tc = interp1d(x,y) else: self.logger("need Tc values and Timepoints of equal length",2,warn=True) self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [1e-5, 1e-5]) else: self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [Tc+ttconf.TINY_NUMBER, Tc+ttconf.TINY_NUMBER]) self.calc_integral_merger_rate()
python
{ "resource": "" }
q272998
Coalescent.calc_branch_count
test
def calc_branch_count(self): ''' calculates an interpolation object that maps time to the number of concurrent branches in the tree. The result is stored in self.nbranches ''' # make a list of (time, merger or loss event) by root first iteration self.tree_events = np.array(sorted([(n.time_before_present, len(n.clades)-1) for n in self.tree.find_clades() if not n.bad_branch], key=lambda x:-x[0])) # collapse multiple events at one time point into sum of changes from collections import defaultdict dn_branch = defaultdict(int) for (t, dn) in self.tree_events: dn_branch[t]+=dn unique_mergers = np.array(sorted(dn_branch.items(), key = lambda x:-x[0])) # calculate the branch count at each point summing the delta branch counts nbranches = [[ttconf.BIG_NUMBER, 1], [unique_mergers[0,0]+ttconf.TINY_NUMBER, 1]] for ti, (t, dn) in enumerate(unique_mergers[:-1]): new_n = nbranches[-1][1]+dn next_t = unique_mergers[ti+1,0]+ttconf.TINY_NUMBER nbranches.append([t, new_n]) nbranches.append([next_t, new_n]) new_n += unique_mergers[-1,1] nbranches.append([next_t, new_n]) nbranches.append([-ttconf.BIG_NUMBER, new_n]) nbranches=np.array(nbranches) self.nbranches = interp1d(nbranches[:,0], nbranches[:,1], kind='linear')
python
{ "resource": "" }
q272999
Coalescent.cost
test
def cost(self, t_node, branch_length, multiplicity=2.0): ''' returns the cost associated with a branch starting at t_node t_node is time before present, the branch goes back in time Args: - t_node: time of the node - branch_length: branch length, determines when this branch merges with sister - multiplicity: 2 if merger is binary, higher if this is a polytomy ''' merger_time = t_node+branch_length return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\ - np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity
python
{ "resource": "" }